diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -1633,10 +1633,12 @@ // 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions let HasMask = false, HasPolicy = false in { - defm vadc : RVVCarryinBuiltinSet; + let HasNoMaskPassThru = true in { + defm vadc : RVVCarryinBuiltinSet; + defm vsbc : RVVCarryinBuiltinSet; + } defm vmadc : RVVCarryOutInBuiltinSet<"vmadc_carry_in">; defm vmadc : RVVIntMaskOutBuiltinSet; - defm vsbc : RVVCarryinBuiltinSet; defm vmsbc : RVVCarryOutInBuiltinSet<"vmsbc_borrow_in">; defm vmsbc : RVVIntMaskOutBuiltinSet; } @@ -1742,6 +1744,8 @@ ManualCodegen = [{ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3); IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()}; + // insert undef passthru + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); }] in { defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "csil", [["vvm", "v", "vmvv"], @@ -1876,6 +1880,8 @@ ManualCodegen = [{ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3); IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()}; + // insert undef passthru + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); }] in { defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "xfd", [["vvm", "v", "vmvv"]]>; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t carryin, @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t carryin, @@ -46,7 +46,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, @@ -56,7 +56,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t carryin, @@ -66,7 +66,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin, @@ -76,7 +76,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t carryin, @@ -86,7 +86,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin, @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t carryin, @@ -106,7 +106,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin, @@ -116,7 +116,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t carryin, @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin, @@ -136,7 +136,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t carryin, @@ -146,7 +146,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, @@ -156,7 +156,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadc_vxm_i16mf4(vint16mf4_t op1, int16_t op2, @@ -166,7 +166,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, @@ -176,7 +176,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadc_vxm_i16mf2(vint16mf2_t op1, int16_t op2, @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, @@ -196,7 +196,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t carryin, @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t carryin, @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t carryin, @@ -226,7 +226,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t carryin, @@ -236,7 +236,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t carryin, @@ -246,7 +246,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t carryin, @@ -256,7 +256,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t carryin, @@ -266,7 +266,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadc_vxm_i32mf2(vint32mf2_t op1, int32_t op2, @@ -286,7 +286,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, @@ -296,7 +296,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t carryin, @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, @@ -316,7 +316,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t carryin, @@ -326,7 +326,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t carryin, @@ -336,7 +336,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t carryin, @@ -346,7 +346,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t carryin, @@ -356,7 +356,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t carryin, @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, @@ -376,7 +376,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t carryin, @@ -386,7 +386,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t carryin, @@ -406,7 +406,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, @@ -416,7 +416,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t carryin, @@ -426,7 +426,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t carryin, @@ -436,7 +436,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t carryin, @@ -446,7 +446,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t carryin, @@ -466,7 +466,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, @@ -476,7 +476,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t carryin, @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, @@ -496,7 +496,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t carryin, @@ -506,7 +506,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin, @@ -516,7 +516,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t carryin, @@ -526,7 +526,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin, @@ -536,7 +536,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t carryin, @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin, @@ -556,7 +556,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t carryin, @@ -566,7 +566,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin, @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t carryin, @@ -586,7 +586,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -596,7 +596,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, @@ -606,7 +606,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -616,7 +616,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, @@ -626,7 +626,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadc_vxm_u16m1(vuint16m1_t op1, uint16_t op2, @@ -646,7 +646,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, @@ -656,7 +656,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t carryin, @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, @@ -676,7 +676,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t carryin, @@ -686,7 +686,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, @@ -696,7 +696,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t carryin, @@ -706,7 +706,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -716,7 +716,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, @@ -736,7 +736,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadc_vxm_u32m1(vuint32m1_t op1, uint32_t op2, @@ -746,7 +746,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadc_vxm_u32m2(vuint32m2_t op1, uint32_t op2, @@ -766,7 +766,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, @@ -776,7 +776,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t carryin, @@ -786,7 +786,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, @@ -796,7 +796,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t carryin, @@ -806,7 +806,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, @@ -816,7 +816,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadc_vxm_u64m1(vuint64m1_t op1, uint64_t op2, @@ -826,7 +826,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, @@ -836,7 +836,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadc_vxm_u64m2(vuint64m2_t op1, uint64_t op2, @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, @@ -856,7 +856,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadc_vxm_u64m4(vuint64m4_t op1, uint64_t op2, @@ -866,7 +866,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, @@ -876,7 +876,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadc_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t carryin, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmerge_vfm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmerge_vfm_f32m1(vbool32_t mask, vfloat32m1_t op1, float op2, @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmerge_vfm_f32m2(vbool16_t mask, vfloat32m2_t op1, float op2, @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmerge_vfm_f32m4(vbool8_t mask, vfloat32m4_t op1, float op2, @@ -47,7 +47,7 @@ // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmerge_vfm_f32m8(vbool4_t mask, vfloat32m8_t op1, float op2, @@ -57,7 +57,7 @@ // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmerge_vfm_f64m1(vbool64_t mask, vfloat64m1_t op1, @@ -67,7 +67,7 @@ // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmerge_vfm_f64m2(vbool32_t mask, vfloat64m2_t op1, @@ -77,7 +77,7 @@ // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmerge_vfm_f64m4(vbool16_t mask, vfloat64m4_t op1, @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmerge_vfm_f64m8(vbool8_t mask, vfloat64m8_t op1, double op2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmerge_vvm_i8mf8(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmerge_vxm_i8mf8(vbool64_t mask, vint8mf8_t op1, int8_t op2, @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmerge_vvm_i8mf4(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmerge_vxm_i8mf4(vbool32_t mask, vint8mf4_t op1, int8_t op2, @@ -47,7 +47,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmerge_vvm_i8mf2(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, @@ -57,7 +57,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmerge_vxm_i8mf2(vbool16_t mask, vint8mf2_t op1, int8_t op2, @@ -67,7 +67,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmerge_vvm_i8m1(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, @@ -77,7 +77,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmerge_vxm_i8m1(vbool8_t mask, vint8m1_t op1, int8_t op2, @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmerge_vvm_i8m2(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, @@ -97,7 +97,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmerge_vxm_i8m2(vbool4_t mask, vint8m2_t op1, int8_t op2, @@ -107,7 +107,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmerge_vvm_i8m4(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmerge_vxm_i8m4(vbool2_t mask, vint8m4_t op1, int8_t op2, @@ -127,7 +127,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmerge_vvm_i8m8(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, @@ -137,7 +137,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmerge_vxm_i8m8(vbool1_t mask, vint8m8_t op1, int8_t op2, @@ -147,7 +147,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmerge_vvm_i16mf4(vbool64_t mask, vint16mf4_t op1, @@ -157,7 +157,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmerge_vxm_i16mf4(vbool64_t mask, vint16mf4_t op1, int16_t op2, @@ -167,7 +167,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmerge_vvm_i16mf2(vbool32_t mask, vint16mf2_t op1, @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmerge_vxm_i16mf2(vbool32_t mask, vint16mf2_t op1, int16_t op2, @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmerge_vvm_i16m1(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmerge_vxm_i16m1(vbool16_t mask, vint16m1_t op1, int16_t op2, @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmerge_vvm_i16m2(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, @@ -217,7 +217,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmerge_vxm_i16m2(vbool8_t mask, vint16m2_t op1, int16_t op2, @@ -227,7 +227,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmerge_vvm_i16m4(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, @@ -237,7 +237,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmerge_vxm_i16m4(vbool4_t mask, vint16m4_t op1, int16_t op2, @@ -247,7 +247,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmerge_vvm_i16m8(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, @@ -257,7 +257,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmerge_vxm_i16m8(vbool2_t mask, vint16m8_t op1, int16_t op2, @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmerge_vvm_i32mf2(vbool64_t mask, vint32mf2_t op1, @@ -277,7 +277,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmerge_vxm_i32mf2(vbool64_t mask, vint32mf2_t op1, int32_t op2, @@ -287,7 +287,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmerge_vvm_i32m1(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmerge_vxm_i32m1(vbool32_t mask, vint32m1_t op1, int32_t op2, @@ -307,7 +307,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmerge_vvm_i32m2(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, @@ -317,7 +317,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmerge_vxm_i32m2(vbool16_t mask, vint32m2_t op1, int32_t op2, @@ -327,7 +327,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmerge_vvm_i32m4(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, @@ -337,7 +337,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmerge_vxm_i32m4(vbool8_t mask, vint32m4_t op1, int32_t op2, @@ -347,7 +347,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmerge_vvm_i32m8(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmerge_vxm_i32m8(vbool4_t mask, vint32m8_t op1, int32_t op2, @@ -367,7 +367,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmerge_vvm_i64m1(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, @@ -377,7 +377,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmerge_vxm_i64m1(vbool64_t mask, vint64m1_t op1, int64_t op2, @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmerge_vvm_i64m2(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, @@ -397,7 +397,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmerge_vxm_i64m2(vbool32_t mask, vint64m2_t op1, int64_t op2, @@ -407,7 +407,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmerge_vvm_i64m4(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, @@ -417,7 +417,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmerge_vxm_i64m4(vbool16_t mask, vint64m4_t op1, int64_t op2, @@ -427,7 +427,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmerge_vvm_i64m8(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, @@ -437,7 +437,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmerge_vxm_i64m8(vbool8_t mask, vint64m8_t op1, int64_t op2, @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmerge_vvm_u8mf8(vbool64_t mask, vuint8mf8_t op1, @@ -457,7 +457,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmerge_vxm_u8mf8(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, @@ -467,7 +467,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmerge_vvm_u8mf4(vbool32_t mask, vuint8mf4_t op1, @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmerge_vxm_u8mf4(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, @@ -487,7 +487,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmerge_vvm_u8mf2(vbool16_t mask, vuint8mf2_t op1, @@ -497,7 +497,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmerge_vxm_u8mf2(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, @@ -507,7 +507,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmerge_vvm_u8m1(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, @@ -517,7 +517,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmerge_vxm_u8m1(vbool8_t mask, vuint8m1_t op1, uint8_t op2, @@ -527,7 +527,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmerge_vvm_u8m2(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmerge_vxm_u8m2(vbool4_t mask, vuint8m2_t op1, uint8_t op2, @@ -547,7 +547,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmerge_vvm_u8m4(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, @@ -557,7 +557,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmerge_vxm_u8m4(vbool2_t mask, vuint8m4_t op1, uint8_t op2, @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmerge_vvm_u8m8(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, @@ -577,7 +577,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmerge_vxm_u8m8(vbool1_t mask, vuint8m8_t op1, uint8_t op2, @@ -587,7 +587,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmerge_vvm_u16mf4(vbool64_t mask, vuint16mf4_t op1, @@ -597,7 +597,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmerge_vxm_u16mf4(vbool64_t mask, vuint16mf4_t op1, @@ -607,7 +607,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmerge_vvm_u16mf2(vbool32_t mask, vuint16mf2_t op1, @@ -617,7 +617,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmerge_vxm_u16mf2(vbool32_t mask, vuint16mf2_t op1, @@ -627,7 +627,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmerge_vvm_u16m1(vbool16_t mask, vuint16m1_t op1, @@ -637,7 +637,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmerge_vxm_u16m1(vbool16_t mask, vuint16m1_t op1, uint16_t op2, @@ -647,7 +647,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmerge_vvm_u16m2(vbool8_t mask, vuint16m2_t op1, @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmerge_vxm_u16m2(vbool8_t mask, vuint16m2_t op1, uint16_t op2, @@ -667,7 +667,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmerge_vvm_u16m4(vbool4_t mask, vuint16m4_t op1, @@ -677,7 +677,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmerge_vxm_u16m4(vbool4_t mask, vuint16m4_t op1, uint16_t op2, @@ -687,7 +687,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmerge_vvm_u16m8(vbool2_t mask, vuint16m8_t op1, @@ -697,7 +697,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmerge_vxm_u16m8(vbool2_t mask, vuint16m8_t op1, uint16_t op2, @@ -707,7 +707,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmerge_vvm_u32mf2(vbool64_t mask, vuint32mf2_t op1, @@ -717,7 +717,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmerge_vxm_u32mf2(vbool64_t mask, vuint32mf2_t op1, @@ -727,7 +727,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmerge_vvm_u32m1(vbool32_t mask, vuint32m1_t op1, @@ -737,7 +737,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmerge_vxm_u32m1(vbool32_t mask, vuint32m1_t op1, uint32_t op2, @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmerge_vvm_u32m2(vbool16_t mask, vuint32m2_t op1, @@ -757,7 +757,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmerge_vxm_u32m2(vbool16_t mask, vuint32m2_t op1, uint32_t op2, @@ -767,7 +767,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmerge_vvm_u32m4(vbool8_t mask, vuint32m4_t op1, @@ -777,7 +777,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmerge_vxm_u32m4(vbool8_t mask, vuint32m4_t op1, uint32_t op2, @@ -787,7 +787,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmerge_vvm_u32m8(vbool4_t mask, vuint32m8_t op1, @@ -797,7 +797,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmerge_vxm_u32m8(vbool4_t mask, vuint32m8_t op1, uint32_t op2, @@ -807,7 +807,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmerge_vvm_u64m1(vbool64_t mask, vuint64m1_t op1, @@ -817,7 +817,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmerge_vxm_u64m1(vbool64_t mask, vuint64m1_t op1, uint64_t op2, @@ -827,7 +827,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmerge_vvm_u64m2(vbool32_t mask, vuint64m2_t op1, @@ -837,7 +837,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmerge_vxm_u64m2(vbool32_t mask, vuint64m2_t op1, uint64_t op2, @@ -847,7 +847,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmerge_vvm_u64m4(vbool16_t mask, vuint64m4_t op1, @@ -857,7 +857,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmerge_vxm_u64m4(vbool16_t mask, vuint64m4_t op1, uint64_t op2, @@ -867,7 +867,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmerge_vvm_u64m8(vbool8_t mask, vuint64m8_t op1, @@ -877,7 +877,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmerge_vxm_u64m8(vbool8_t mask, vuint64m8_t op1, uint64_t op2, @@ -887,7 +887,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vmerge_vvm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, @@ -897,7 +897,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vmerge_vvm_f32m1(vbool32_t mask, vfloat32m1_t op1, @@ -907,7 +907,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vmerge_vvm_f32m2(vbool16_t mask, vfloat32m2_t op1, @@ -917,7 +917,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vmerge_vvm_f32m4(vbool8_t mask, vfloat32m4_t op1, @@ -927,7 +927,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vmerge_vvm_f32m8(vbool4_t mask, vfloat32m8_t op1, @@ -937,7 +937,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vmerge_vvm_f64m1(vbool64_t mask, vfloat64m1_t op1, @@ -947,7 +947,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vmerge_vvm_f64m2(vbool32_t mask, vfloat64m2_t op1, @@ -957,7 +957,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vmerge_vvm_f64m4(vbool16_t mask, vfloat64m4_t op1, @@ -967,7 +967,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vmerge_vvm_f64m8(vbool8_t mask, vfloat64m8_t op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsbc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsbc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t borrowin, @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsbc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsbc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t borrowin, @@ -46,7 +46,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsbc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, @@ -56,7 +56,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsbc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t borrowin, @@ -66,7 +66,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsbc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin, @@ -76,7 +76,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsbc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t borrowin, @@ -86,7 +86,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsbc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin, @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsbc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t borrowin, @@ -106,7 +106,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsbc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin, @@ -116,7 +116,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsbc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t borrowin, @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsbc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin, @@ -136,7 +136,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsbc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t borrowin, @@ -146,7 +146,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsbc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, @@ -156,7 +156,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsbc_vxm_i16mf4(vint16mf4_t op1, int16_t op2, @@ -166,7 +166,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsbc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, @@ -176,7 +176,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsbc_vxm_i16mf2(vint16mf2_t op1, int16_t op2, @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsbc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, @@ -196,7 +196,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsbc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t borrowin, @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsbc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsbc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t borrowin, @@ -226,7 +226,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsbc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, @@ -236,7 +236,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsbc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t borrowin, @@ -246,7 +246,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsbc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, @@ -256,7 +256,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsbc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t borrowin, @@ -266,7 +266,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsbc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsbc_vxm_i32mf2(vint32mf2_t op1, int32_t op2, @@ -286,7 +286,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsbc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, @@ -296,7 +296,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsbc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t borrowin, @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsbc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, @@ -316,7 +316,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsbc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t borrowin, @@ -326,7 +326,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsbc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, @@ -336,7 +336,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsbc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t borrowin, @@ -346,7 +346,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsbc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, @@ -356,7 +356,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsbc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t borrowin, @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsbc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, @@ -376,7 +376,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsbc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t borrowin, @@ -386,7 +386,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsbc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsbc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t borrowin, @@ -406,7 +406,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsbc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, @@ -416,7 +416,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsbc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t borrowin, @@ -426,7 +426,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsbc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, @@ -436,7 +436,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsbc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t borrowin, @@ -446,7 +446,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsbc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsbc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, @@ -466,7 +466,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsbc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, @@ -476,7 +476,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsbc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsbc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, @@ -496,7 +496,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsbc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, @@ -506,7 +506,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsbc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin, @@ -516,7 +516,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsbc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin, @@ -526,7 +526,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsbc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin, @@ -536,7 +536,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsbc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin, @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsbc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin, @@ -556,7 +556,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsbc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin, @@ -566,7 +566,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsbc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin, @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsbc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin, @@ -586,7 +586,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsbc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -596,7 +596,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsbc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, @@ -606,7 +606,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsbc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -616,7 +616,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsbc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, @@ -626,7 +626,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsbc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsbc_vxm_u16m1(vuint16m1_t op1, uint16_t op2, @@ -646,7 +646,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsbc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, @@ -656,7 +656,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsbc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsbc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, @@ -676,7 +676,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsbc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, @@ -686,7 +686,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsbc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, @@ -696,7 +696,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsbc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, @@ -706,7 +706,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsbc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -716,7 +716,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsbc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsbc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, @@ -736,7 +736,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsbc_vxm_u32m1(vuint32m1_t op1, uint32_t op2, @@ -746,7 +746,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsbc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsbc_vxm_u32m2(vuint32m2_t op1, uint32_t op2, @@ -766,7 +766,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsbc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, @@ -776,7 +776,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsbc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, @@ -786,7 +786,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsbc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, @@ -796,7 +796,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsbc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, @@ -806,7 +806,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsbc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, @@ -816,7 +816,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsbc_vxm_u64m1(vuint64m1_t op1, uint64_t op2, @@ -826,7 +826,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsbc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, @@ -836,7 +836,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsbc_vxm_u64m2(vuint64m2_t op1, uint64_t op2, @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsbc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, @@ -856,7 +856,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsbc_vxm_u64m4(vuint64m4_t op1, uint64_t op2, @@ -866,7 +866,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsbc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, @@ -876,7 +876,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsbc_vxm_u64m8(vuint64m8_t op1, uint64_t op2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadc.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t carryin, @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t carryin, @@ -46,7 +46,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, @@ -56,7 +56,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t carryin, @@ -66,7 +66,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin, @@ -76,7 +76,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t carryin, @@ -86,7 +86,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin, @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t carryin, @@ -106,7 +106,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin, @@ -116,7 +116,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t carryin, @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin, @@ -136,7 +136,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t carryin, @@ -146,7 +146,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, @@ -156,7 +156,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadc_vxm_i16mf4(vint16mf4_t op1, int16_t op2, @@ -166,7 +166,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, @@ -176,7 +176,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadc_vxm_i16mf2(vint16mf2_t op1, int16_t op2, @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, @@ -196,7 +196,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t carryin, @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t carryin, @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t carryin, @@ -226,7 +226,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t carryin, @@ -236,7 +236,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t carryin, @@ -246,7 +246,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t carryin, @@ -256,7 +256,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t carryin, @@ -266,7 +266,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadc_vxm_i32mf2(vint32mf2_t op1, int32_t op2, @@ -286,7 +286,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, @@ -296,7 +296,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t carryin, @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, @@ -316,7 +316,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t carryin, @@ -326,7 +326,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t carryin, @@ -336,7 +336,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t carryin, @@ -346,7 +346,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t carryin, @@ -356,7 +356,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t carryin, @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, @@ -376,7 +376,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t carryin, @@ -386,7 +386,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t carryin, @@ -406,7 +406,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, @@ -416,7 +416,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t carryin, @@ -426,7 +426,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t carryin, @@ -436,7 +436,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t carryin, @@ -446,7 +446,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t carryin, @@ -466,7 +466,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, @@ -476,7 +476,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t carryin, @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, @@ -496,7 +496,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t carryin, @@ -506,7 +506,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin, @@ -516,7 +516,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t carryin, @@ -526,7 +526,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin, @@ -536,7 +536,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t carryin, @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin, @@ -556,7 +556,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t carryin, @@ -566,7 +566,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin, @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t carryin, @@ -586,7 +586,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -596,7 +596,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, @@ -606,7 +606,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -616,7 +616,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, @@ -626,7 +626,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadc_vxm_u16m1(vuint16m1_t op1, uint16_t op2, @@ -646,7 +646,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, @@ -656,7 +656,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t carryin, @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, @@ -676,7 +676,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t carryin, @@ -686,7 +686,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, @@ -696,7 +696,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t carryin, @@ -706,7 +706,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -716,7 +716,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, @@ -736,7 +736,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadc_vxm_u32m1(vuint32m1_t op1, uint32_t op2, @@ -746,7 +746,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadc_vxm_u32m2(vuint32m2_t op1, uint32_t op2, @@ -766,7 +766,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, @@ -776,7 +776,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t carryin, @@ -786,7 +786,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, @@ -796,7 +796,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t carryin, @@ -806,7 +806,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, @@ -816,7 +816,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadc_vxm_u64m1(vuint64m1_t op1, uint64_t op2, @@ -826,7 +826,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, @@ -836,7 +836,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadc_vxm_u64m2(vuint64m2_t op1, uint64_t op2, @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, @@ -856,7 +856,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadc_vxm_u64m4(vuint64m4_t op1, uint64_t op2, @@ -866,7 +866,7 @@ // CHECK-RV64-LABEL: @test_vadc_vvm_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, @@ -876,7 +876,7 @@ // CHECK-RV64-LABEL: @test_vadc_vxm_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadc_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t carryin, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmerge_vfm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmerge_vfm_f32m1(vbool32_t mask, vfloat32m1_t op1, float op2, @@ -28,7 +28,7 @@ // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmerge_vfm_f32m2(vbool16_t mask, vfloat32m2_t op1, float op2, @@ -38,7 +38,7 @@ // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmerge_vfm_f32m4(vbool8_t mask, vfloat32m4_t op1, float op2, @@ -48,7 +48,7 @@ // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmerge_vfm_f32m8(vbool4_t mask, vfloat32m8_t op1, float op2, @@ -58,7 +58,7 @@ // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmerge_vfm_f64m1(vbool64_t mask, vfloat64m1_t op1, @@ -68,7 +68,7 @@ // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmerge_vfm_f64m2(vbool32_t mask, vfloat64m2_t op1, @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmerge_vfm_f64m4(vbool16_t mask, vfloat64m4_t op1, @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmerge_vfm_f64m8(vbool8_t mask, vfloat64m8_t op1, double op2, @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmerge_vfm_f16mf4 (vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -107,7 +107,7 @@ // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmerge_vfm_f16mf2 (vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -116,7 +116,7 @@ // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmerge_vfm_f16m1 (vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -125,7 +125,7 @@ // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmerge_vfm_f16m2 (vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -134,7 +134,7 @@ // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmerge_vfm_f16m4 (vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -143,7 +143,7 @@ // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmerge_vfm_f16m8 (vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmerge_vvm_i8mf8(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmerge_vxm_i8mf8(vbool64_t mask, vint8mf8_t op1, int8_t op2, @@ -28,7 +28,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmerge_vvm_i8mf4(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, @@ -38,7 +38,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmerge_vxm_i8mf4(vbool32_t mask, vint8mf4_t op1, int8_t op2, @@ -48,7 +48,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmerge_vvm_i8mf2(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, @@ -58,7 +58,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmerge_vxm_i8mf2(vbool16_t mask, vint8mf2_t op1, int8_t op2, @@ -68,7 +68,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmerge_vvm_i8m1(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmerge_vxm_i8m1(vbool8_t mask, vint8m1_t op1, int8_t op2, @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmerge_vvm_i8m2(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmerge_vxm_i8m2(vbool4_t mask, vint8m2_t op1, int8_t op2, @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmerge_vvm_i8m4(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, @@ -118,7 +118,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmerge_vxm_i8m4(vbool2_t mask, vint8m4_t op1, int8_t op2, @@ -128,7 +128,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmerge_vvm_i8m8(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, @@ -138,7 +138,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmerge_vxm_i8m8(vbool1_t mask, vint8m8_t op1, int8_t op2, @@ -148,7 +148,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmerge_vvm_i16mf4(vbool64_t mask, vint16mf4_t op1, @@ -158,7 +158,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmerge_vxm_i16mf4(vbool64_t mask, vint16mf4_t op1, int16_t op2, @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmerge_vvm_i16mf2(vbool32_t mask, vint16mf2_t op1, @@ -178,7 +178,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmerge_vxm_i16mf2(vbool32_t mask, vint16mf2_t op1, int16_t op2, @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmerge_vvm_i16m1(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmerge_vxm_i16m1(vbool16_t mask, vint16m1_t op1, int16_t op2, @@ -208,7 +208,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmerge_vvm_i16m2(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, @@ -218,7 +218,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmerge_vxm_i16m2(vbool8_t mask, vint16m2_t op1, int16_t op2, @@ -228,7 +228,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmerge_vvm_i16m4(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, @@ -238,7 +238,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmerge_vxm_i16m4(vbool4_t mask, vint16m4_t op1, int16_t op2, @@ -248,7 +248,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmerge_vvm_i16m8(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmerge_vxm_i16m8(vbool2_t mask, vint16m8_t op1, int16_t op2, @@ -268,7 +268,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmerge_vvm_i32mf2(vbool64_t mask, vint32mf2_t op1, @@ -278,7 +278,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmerge_vxm_i32mf2(vbool64_t mask, vint32mf2_t op1, int32_t op2, @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmerge_vvm_i32m1(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, @@ -298,7 +298,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmerge_vxm_i32m1(vbool32_t mask, vint32m1_t op1, int32_t op2, @@ -308,7 +308,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmerge_vvm_i32m2(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, @@ -318,7 +318,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmerge_vxm_i32m2(vbool16_t mask, vint32m2_t op1, int32_t op2, @@ -328,7 +328,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmerge_vvm_i32m4(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, @@ -338,7 +338,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmerge_vxm_i32m4(vbool8_t mask, vint32m4_t op1, int32_t op2, @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmerge_vvm_i32m8(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, @@ -358,7 +358,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmerge_vxm_i32m8(vbool4_t mask, vint32m8_t op1, int32_t op2, @@ -368,7 +368,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmerge_vvm_i64m1(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmerge_vxm_i64m1(vbool64_t mask, vint64m1_t op1, int64_t op2, @@ -388,7 +388,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmerge_vvm_i64m2(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, @@ -398,7 +398,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmerge_vxm_i64m2(vbool32_t mask, vint64m2_t op1, int64_t op2, @@ -408,7 +408,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmerge_vvm_i64m4(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, @@ -418,7 +418,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmerge_vxm_i64m4(vbool16_t mask, vint64m4_t op1, int64_t op2, @@ -428,7 +428,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmerge_vvm_i64m8(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmerge_vxm_i64m8(vbool8_t mask, vint64m8_t op1, int64_t op2, @@ -448,7 +448,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmerge_vvm_u8mf8(vbool64_t mask, vuint8mf8_t op1, @@ -458,7 +458,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmerge_vxm_u8mf8(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmerge_vvm_u8mf4(vbool32_t mask, vuint8mf4_t op1, @@ -478,7 +478,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmerge_vxm_u8mf4(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, @@ -488,7 +488,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmerge_vvm_u8mf2(vbool16_t mask, vuint8mf2_t op1, @@ -498,7 +498,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmerge_vxm_u8mf2(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, @@ -508,7 +508,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmerge_vvm_u8m1(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, @@ -518,7 +518,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmerge_vxm_u8m1(vbool8_t mask, vuint8m1_t op1, uint8_t op2, @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmerge_vvm_u8m2(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, @@ -538,7 +538,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmerge_vxm_u8m2(vbool4_t mask, vuint8m2_t op1, uint8_t op2, @@ -548,7 +548,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmerge_vvm_u8m4(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmerge_vxm_u8m4(vbool2_t mask, vuint8m4_t op1, uint8_t op2, @@ -568,7 +568,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmerge_vvm_u8m8(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, @@ -578,7 +578,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmerge_vxm_u8m8(vbool1_t mask, vuint8m8_t op1, uint8_t op2, @@ -588,7 +588,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmerge_vvm_u16mf4(vbool64_t mask, vuint16mf4_t op1, @@ -598,7 +598,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmerge_vxm_u16mf4(vbool64_t mask, vuint16mf4_t op1, @@ -608,7 +608,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmerge_vvm_u16mf2(vbool32_t mask, vuint16mf2_t op1, @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmerge_vxm_u16mf2(vbool32_t mask, vuint16mf2_t op1, @@ -628,7 +628,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmerge_vvm_u16m1(vbool16_t mask, vuint16m1_t op1, @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmerge_vxm_u16m1(vbool16_t mask, vuint16m1_t op1, uint16_t op2, @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmerge_vvm_u16m2(vbool8_t mask, vuint16m2_t op1, @@ -658,7 +658,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmerge_vxm_u16m2(vbool8_t mask, vuint16m2_t op1, uint16_t op2, @@ -668,7 +668,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmerge_vvm_u16m4(vbool4_t mask, vuint16m4_t op1, @@ -678,7 +678,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmerge_vxm_u16m4(vbool4_t mask, vuint16m4_t op1, uint16_t op2, @@ -688,7 +688,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmerge_vvm_u16m8(vbool2_t mask, vuint16m8_t op1, @@ -698,7 +698,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmerge_vxm_u16m8(vbool2_t mask, vuint16m8_t op1, uint16_t op2, @@ -708,7 +708,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmerge_vvm_u32mf2(vbool64_t mask, vuint32mf2_t op1, @@ -718,7 +718,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmerge_vxm_u32mf2(vbool64_t mask, vuint32mf2_t op1, @@ -728,7 +728,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmerge_vvm_u32m1(vbool32_t mask, vuint32m1_t op1, @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmerge_vxm_u32m1(vbool32_t mask, vuint32m1_t op1, uint32_t op2, @@ -748,7 +748,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmerge_vvm_u32m2(vbool16_t mask, vuint32m2_t op1, @@ -758,7 +758,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmerge_vxm_u32m2(vbool16_t mask, vuint32m2_t op1, uint32_t op2, @@ -768,7 +768,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmerge_vvm_u32m4(vbool8_t mask, vuint32m4_t op1, @@ -778,7 +778,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmerge_vxm_u32m4(vbool8_t mask, vuint32m4_t op1, uint32_t op2, @@ -788,7 +788,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmerge_vvm_u32m8(vbool4_t mask, vuint32m8_t op1, @@ -798,7 +798,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmerge_vxm_u32m8(vbool4_t mask, vuint32m8_t op1, uint32_t op2, @@ -808,7 +808,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmerge_vvm_u64m1(vbool64_t mask, vuint64m1_t op1, @@ -818,7 +818,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmerge_vxm_u64m1(vbool64_t mask, vuint64m1_t op1, uint64_t op2, @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmerge_vvm_u64m2(vbool32_t mask, vuint64m2_t op1, @@ -838,7 +838,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmerge_vxm_u64m2(vbool32_t mask, vuint64m2_t op1, uint64_t op2, @@ -848,7 +848,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmerge_vvm_u64m4(vbool16_t mask, vuint64m4_t op1, @@ -858,7 +858,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmerge_vxm_u64m4(vbool16_t mask, vuint64m4_t op1, uint64_t op2, @@ -868,7 +868,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmerge_vvm_u64m8(vbool8_t mask, vuint64m8_t op1, @@ -878,7 +878,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmerge_vxm_u64m8(vbool8_t mask, vuint64m8_t op1, uint64_t op2, @@ -888,7 +888,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vmerge_vvm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, @@ -898,7 +898,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vmerge_vvm_f32m1(vbool32_t mask, vfloat32m1_t op1, @@ -908,7 +908,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vmerge_vvm_f32m2(vbool16_t mask, vfloat32m2_t op1, @@ -918,7 +918,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vmerge_vvm_f32m4(vbool8_t mask, vfloat32m4_t op1, @@ -928,7 +928,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vmerge_vvm_f32m8(vbool4_t mask, vfloat32m8_t op1, @@ -938,7 +938,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vmerge_vvm_f64m1(vbool64_t mask, vfloat64m1_t op1, @@ -948,7 +948,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vmerge_vvm_f64m2(vbool32_t mask, vfloat64m2_t op1, @@ -958,7 +958,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vmerge_vvm_f64m4(vbool16_t mask, vfloat64m4_t op1, @@ -968,7 +968,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vmerge_vvm_f64m8(vbool8_t mask, vfloat64m8_t op1, @@ -978,7 +978,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vmerge_vvm_f16mf4 (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -987,7 +987,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vmerge_vvm_f16mf2 (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -996,7 +996,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vmerge_vvm_f16m1 (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -1005,7 +1005,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vmerge_vvm_f16m2 (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -1014,7 +1014,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vmerge_vvm_f16m4 (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -1023,7 +1023,7 @@ // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vmerge_vvm_f16m8 (vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsbc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsbc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsbc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsbc.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsbc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsbc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t borrowin, @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsbc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsbc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t borrowin, @@ -46,7 +46,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsbc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, @@ -56,7 +56,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsbc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t borrowin, @@ -66,7 +66,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsbc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin, @@ -76,7 +76,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsbc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t borrowin, @@ -86,7 +86,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsbc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin, @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsbc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t borrowin, @@ -106,7 +106,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsbc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin, @@ -116,7 +116,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsbc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t borrowin, @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsbc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin, @@ -136,7 +136,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsbc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t borrowin, @@ -146,7 +146,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsbc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, @@ -156,7 +156,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsbc_vxm_i16mf4(vint16mf4_t op1, int16_t op2, @@ -166,7 +166,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsbc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, @@ -176,7 +176,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsbc_vxm_i16mf2(vint16mf2_t op1, int16_t op2, @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsbc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, @@ -196,7 +196,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsbc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t borrowin, @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsbc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsbc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t borrowin, @@ -226,7 +226,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsbc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, @@ -236,7 +236,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsbc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t borrowin, @@ -246,7 +246,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsbc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, @@ -256,7 +256,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsbc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t borrowin, @@ -266,7 +266,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsbc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsbc_vxm_i32mf2(vint32mf2_t op1, int32_t op2, @@ -286,7 +286,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsbc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, @@ -296,7 +296,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsbc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t borrowin, @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsbc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, @@ -316,7 +316,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsbc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t borrowin, @@ -326,7 +326,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsbc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, @@ -336,7 +336,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsbc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t borrowin, @@ -346,7 +346,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsbc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, @@ -356,7 +356,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsbc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t borrowin, @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsbc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, @@ -376,7 +376,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsbc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t borrowin, @@ -386,7 +386,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsbc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsbc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t borrowin, @@ -406,7 +406,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsbc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, @@ -416,7 +416,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsbc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t borrowin, @@ -426,7 +426,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsbc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, @@ -436,7 +436,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsbc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t borrowin, @@ -446,7 +446,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsbc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsbc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, @@ -466,7 +466,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsbc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, @@ -476,7 +476,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsbc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsbc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, @@ -496,7 +496,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsbc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, @@ -506,7 +506,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsbc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin, @@ -516,7 +516,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsbc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin, @@ -526,7 +526,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsbc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin, @@ -536,7 +536,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsbc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin, @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsbc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin, @@ -556,7 +556,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsbc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin, @@ -566,7 +566,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsbc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin, @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsbc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin, @@ -586,7 +586,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsbc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -596,7 +596,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsbc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, @@ -606,7 +606,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsbc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -616,7 +616,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsbc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, @@ -626,7 +626,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsbc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsbc_vxm_u16m1(vuint16m1_t op1, uint16_t op2, @@ -646,7 +646,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsbc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, @@ -656,7 +656,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsbc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsbc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, @@ -676,7 +676,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsbc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, @@ -686,7 +686,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsbc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, @@ -696,7 +696,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsbc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, @@ -706,7 +706,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsbc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -716,7 +716,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsbc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsbc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, @@ -736,7 +736,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsbc_vxm_u32m1(vuint32m1_t op1, uint32_t op2, @@ -746,7 +746,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsbc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsbc_vxm_u32m2(vuint32m2_t op1, uint32_t op2, @@ -766,7 +766,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsbc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, @@ -776,7 +776,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsbc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, @@ -786,7 +786,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsbc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, @@ -796,7 +796,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsbc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, @@ -806,7 +806,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsbc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, @@ -816,7 +816,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsbc_vxm_u64m1(vuint64m1_t op1, uint64_t op2, @@ -826,7 +826,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsbc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, @@ -836,7 +836,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsbc_vxm_u64m2(vuint64m2_t op1, uint64_t op2, @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsbc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, @@ -856,7 +856,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsbc_vxm_u64m4(vuint64m4_t op1, uint64_t op2, @@ -866,7 +866,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsbc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, @@ -876,7 +876,7 @@ // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsbc_vxm_u64m8(vuint64m8_t op1, uint64_t op2, diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -527,15 +527,15 @@ let VLOperand = 4; } // For binary operations with V0 as input. - // Input: (vector_in, vector_in/scalar_in, V0, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, V0, vl) class RISCVBinaryWithV0 : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_any_ty, + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let SplatOperand = 1; - let VLOperand = 3; + let SplatOperand = 2; + let VLOperand = 4; } // For binary operations with mask type output and V0 as input. // Output: (mask type output) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -2353,6 +2353,13 @@ Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX, ReadVMask]>; defm "" : VPseudoBinaryV_IM, Sched<[WriteVICALUI, ReadVIALUCV, ReadVMask]>; + // Tied versions to allow codegen control over the tail elements + defm "" : VPseudoTiedBinaryV_VM, + Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV, ReadVMask]>; + defm "" : VPseudoTiedBinaryV_XM, + Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX, ReadVMask]>; + defm "" : VPseudoTiedBinaryV_IM, + Sched<[WriteVICALUI, ReadVIALUCV, ReadVMask]>; } multiclass VPseudoVCALU_VM_XM { @@ -2360,6 +2367,11 @@ Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV, ReadVMask]>; defm "" : VPseudoBinaryV_XM, Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX, ReadVMask]>; + // Tied versions to allow codegen control over the tail elements + defm "" : VPseudoTiedBinaryV_VM, + Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV, ReadVMask]>; + defm "" : VPseudoTiedBinaryV_XM, + Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX, ReadVMask]>; } multiclass VPseudoVCALUM_VM_XM_IM { @@ -3383,6 +3395,42 @@ op2_kind>; } +multiclass VPatBinaryCarryInTAIL +{ + def : Pat<(result_type (!cast(intrinsic) + (result_type undef), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), + VLOpFrag)), + (!cast(inst#"_"#kind#"_"#vlmul.MX) + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), GPR:$vl, sew)>; + def : Pat<(result_type (!cast(intrinsic) + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), + VLOpFrag)), + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_TU") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), GPR:$vl, sew)>; +} + multiclass VPatBinaryCarryIn; } +multiclass VPatBinaryV_VM_TAIL vtilist = AllIntegerVectors> { + foreach vti = vtilist in + defm : VPatBinaryCarryInTAIL; +} + +multiclass VPatBinaryV_XM_TAIL vtilist = AllIntegerVectors> { + foreach vti = vtilist in + defm : VPatBinaryCarryInTAIL; +} + +multiclass VPatBinaryV_IM_TAIL { + foreach vti = AllIntegerVectors in + defm : VPatBinaryCarryInTAIL; +} + multiclass VPatBinaryV_V { foreach vti = AllIntegerVectors in defm : VPatBinaryMaskOut; multiclass VPatBinaryV_VM_XM_IM - : VPatBinaryV_VM, - VPatBinaryV_XM, - VPatBinaryV_IM; + : VPatBinaryV_VM_TAIL, + VPatBinaryV_XM_TAIL, + VPatBinaryV_IM_TAIL; multiclass VPatBinaryM_VM_XM_IM : VPatBinaryV_VM, @@ -3782,8 +3863,8 @@ VPatBinaryV_I; multiclass VPatBinaryV_VM_XM - : VPatBinaryV_VM, - VPatBinaryV_XM; + : VPatBinaryV_VM_TAIL, + VPatBinaryV_XM_TAIL; multiclass VPatBinaryM_VM_XM : VPatBinaryV_VM, @@ -5134,19 +5215,27 @@ // We can use vmerge.vvm to support vector-vector vfmerge. // NOTE: Clang previously used int_riscv_vfmerge for vector-vector, but now uses // int_riscv_vmerge. Support both for compatibility. -defm : VPatBinaryV_VM<"int_riscv_vmerge", "PseudoVMERGE", - /*CarryOut = */0, /*vtilist=*/AllFloatVectors>; -defm : VPatBinaryV_VM<"int_riscv_vfmerge", "PseudoVMERGE", - /*CarryOut = */0, /*vtilist=*/AllFloatVectors>; -defm : VPatBinaryV_XM<"int_riscv_vfmerge", "PseudoVFMERGE", - /*CarryOut = */0, /*vtilist=*/AllFloatVectors>; +defm : VPatBinaryV_VM_TAIL<"int_riscv_vmerge", "PseudoVMERGE", + /*CarryOut = */0, /*vtilist=*/AllFloatVectors>; +defm : VPatBinaryV_VM_TAIL<"int_riscv_vfmerge", "PseudoVMERGE", + /*CarryOut = */0, /*vtilist=*/AllFloatVectors>; +defm : VPatBinaryV_XM_TAIL<"int_riscv_vfmerge", "PseudoVFMERGE", + /*CarryOut = */0, /*vtilist=*/AllFloatVectors>; foreach fvti = AllFloatVectors in { defvar instr = !cast("PseudoVMERGE_VIM_"#fvti.LMul.MX); - def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$rs2), + def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector undef), + (fvti.Vector fvti.RegClass:$rs2), (fvti.Scalar (fpimm0)), (fvti.Mask V0), VLOpFrag)), (instr fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; + defvar instr_tu = !cast("PseudoVMERGE_VIM_"#fvti.LMul.MX#"_TU"); + def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$merge), + (fvti.Vector fvti.RegClass:$rs2), + (fvti.Scalar (fpimm0)), + (fvti.Mask V0), VLOpFrag)), + (instr_tu fvti.RegClass:$merge, fvti.RegClass:$rs2, 0, + (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; } //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll --- a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll @@ -2984,3 +2984,254 @@ ret %a } + +declare @llvm.riscv.vadc.nxv1i8.nxv1i8( + , + , + , + , + iXLen); + +define @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vadc.vvm v8, v9, v10, v0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vadc.vvm v8, v9, v10, v0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vadc.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv1i8.nxv1i8( + , + , + , + , + iXLen); + +define @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vsbc.vvm v8, v9, v10, v0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vsbc.vvm v8, v9, v10, v0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsbc.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv1i8.nxv1i8( + , + , + , + , + iXLen); + +define @intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vmerge.vvm v8, v9, v10, v0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vmerge.vvm v8, v9, v10, v0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmerge.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmerge.nxv8i64.i64( + , + , + i64, + , + iXLen); + +define @intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; RV32-NEXT: vmerge.vvm v8, v16, v24, v0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; RV64-NEXT: vmerge.vxm v8, v16, a0, v0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmerge.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4) + + ret %a +} + +define @intrinsic_vmerge_vim_nxv8i64_nxv8i64_i64( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmerge_vim_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: li a1, 15 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: li a1, -1 +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; RV32-NEXT: vmerge.vvm v8, v16, v24, v0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmerge_vim_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: li a1, -1 +; RV64-NEXT: srli a1, a1, 28 +; RV64-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; RV64-NEXT: vmerge.vxm v8, v16, a1, v0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmerge.nxv8i64.i64( + %0, + %1, + i64 68719476735, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv8f64.f64( + , + , + double, + , + iXLen); + +define @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; RV32-NEXT: vfmerge.vfm v8, v16, fa0, v0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; RV64-NEXT: vfmerge.vfm v8, v16, fa0, v0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv8f64.f64( + %0, + %1, + double %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv1f16.nxv1f16( + , + , + , + , + iXLen); + +define @intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vmerge.vvm v8, v9, v10, v0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vmerge.vvm v8, v9, v10, v0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv1f16.f16( + , + , + half, + , + iXLen); + +define @intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vmerge.vim v8, v9, 0, v0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vmerge.vim v8, v9, 0, v0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv1f16.f16( + %0, + %1, + half zeroinitializer, + %2, + iXLen %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vadc.nxv1i8.nxv1i8( + , , , , @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i8.nxv1i8( + undef, %0, %1, %2, @@ -24,6 +26,7 @@ } declare @llvm.riscv.vadc.nxv2i8.nxv2i8( + , , , , @@ -37,6 +40,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i8.nxv2i8( + undef, %0, %1, %2, @@ -46,6 +50,7 @@ } declare @llvm.riscv.vadc.nxv4i8.nxv4i8( + , , , , @@ -59,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i8.nxv4i8( + undef, %0, %1, %2, @@ -68,6 +74,7 @@ } declare @llvm.riscv.vadc.nxv8i8.nxv8i8( + , , , , @@ -81,6 +88,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i8.nxv8i8( + undef, %0, %1, %2, @@ -90,6 +98,7 @@ } declare @llvm.riscv.vadc.nxv16i8.nxv16i8( + , , , , @@ -103,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv16i8.nxv16i8( + undef, %0, %1, %2, @@ -112,6 +122,7 @@ } declare @llvm.riscv.vadc.nxv32i8.nxv32i8( + , , , , @@ -125,6 +136,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv32i8.nxv32i8( + undef, %0, %1, %2, @@ -134,6 +146,7 @@ } declare @llvm.riscv.vadc.nxv64i8.nxv64i8( + , , , , @@ -147,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv64i8.nxv64i8( + undef, %0, %1, %2, @@ -156,6 +170,7 @@ } declare @llvm.riscv.vadc.nxv1i16.nxv1i16( + , , , , @@ -169,6 +184,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i16.nxv1i16( + undef, %0, %1, %2, @@ -178,6 +194,7 @@ } declare @llvm.riscv.vadc.nxv2i16.nxv2i16( + , , , , @@ -191,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i16.nxv2i16( + undef, %0, %1, %2, @@ -200,6 +218,7 @@ } declare @llvm.riscv.vadc.nxv4i16.nxv4i16( + , , , , @@ -213,6 +232,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i16.nxv4i16( + undef, %0, %1, %2, @@ -222,6 +242,7 @@ } declare @llvm.riscv.vadc.nxv8i16.nxv8i16( + , , , , @@ -235,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i16.nxv8i16( + undef, %0, %1, %2, @@ -244,6 +266,7 @@ } declare @llvm.riscv.vadc.nxv16i16.nxv16i16( + , , , , @@ -257,6 +280,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv16i16.nxv16i16( + undef, %0, %1, %2, @@ -266,6 +290,7 @@ } declare @llvm.riscv.vadc.nxv32i16.nxv32i16( + , , , , @@ -279,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv32i16.nxv32i16( + undef, %0, %1, %2, @@ -288,6 +314,7 @@ } declare @llvm.riscv.vadc.nxv1i32.nxv1i32( + , , , , @@ -301,6 +328,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i32.nxv1i32( + undef, %0, %1, %2, @@ -310,6 +338,7 @@ } declare @llvm.riscv.vadc.nxv2i32.nxv2i32( + , , , , @@ -323,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i32.nxv2i32( + undef, %0, %1, %2, @@ -332,6 +362,7 @@ } declare @llvm.riscv.vadc.nxv4i32.nxv4i32( + , , , , @@ -345,6 +376,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i32.nxv4i32( + undef, %0, %1, %2, @@ -354,6 +386,7 @@ } declare @llvm.riscv.vadc.nxv8i32.nxv8i32( + , , , , @@ -367,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i32.nxv8i32( + undef, %0, %1, %2, @@ -376,6 +410,7 @@ } declare @llvm.riscv.vadc.nxv16i32.nxv16i32( + , , , , @@ -389,6 +424,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv16i32.nxv16i32( + undef, %0, %1, %2, @@ -398,6 +434,7 @@ } declare @llvm.riscv.vadc.nxv1i64.nxv1i64( + , , , , @@ -411,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i64.nxv1i64( + undef, %0, %1, %2, @@ -420,6 +458,7 @@ } declare @llvm.riscv.vadc.nxv2i64.nxv2i64( + , , , , @@ -433,6 +472,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i64.nxv2i64( + undef, %0, %1, %2, @@ -442,6 +482,7 @@ } declare @llvm.riscv.vadc.nxv4i64.nxv4i64( + , , , , @@ -455,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i64.nxv4i64( + undef, %0, %1, %2, @@ -464,6 +506,7 @@ } declare @llvm.riscv.vadc.nxv8i64.nxv8i64( + , , , , @@ -477,6 +520,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i64.nxv8i64( + undef, %0, %1, %2, @@ -486,6 +530,7 @@ } declare @llvm.riscv.vadc.nxv1i8.i8( + , , i8, , @@ -499,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i8.i8( + undef, %0, i8 %1, %2, @@ -508,6 +554,7 @@ } declare @llvm.riscv.vadc.nxv2i8.i8( + , , i8, , @@ -521,6 +568,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i8.i8( + undef, %0, i8 %1, %2, @@ -530,6 +578,7 @@ } declare @llvm.riscv.vadc.nxv4i8.i8( + , , i8, , @@ -543,6 +592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i8.i8( + undef, %0, i8 %1, %2, @@ -552,6 +602,7 @@ } declare @llvm.riscv.vadc.nxv8i8.i8( + , , i8, , @@ -565,6 +616,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i8.i8( + undef, %0, i8 %1, %2, @@ -574,6 +626,7 @@ } declare @llvm.riscv.vadc.nxv16i8.i8( + , , i8, , @@ -587,6 +640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv16i8.i8( + undef, %0, i8 %1, %2, @@ -596,6 +650,7 @@ } declare @llvm.riscv.vadc.nxv32i8.i8( + , , i8, , @@ -609,6 +664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv32i8.i8( + undef, %0, i8 %1, %2, @@ -618,6 +674,7 @@ } declare @llvm.riscv.vadc.nxv64i8.i8( + , , i8, , @@ -631,6 +688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv64i8.i8( + undef, %0, i8 %1, %2, @@ -640,6 +698,7 @@ } declare @llvm.riscv.vadc.nxv1i16.i16( + , , i16, , @@ -653,6 +712,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i16.i16( + undef, %0, i16 %1, %2, @@ -662,6 +722,7 @@ } declare @llvm.riscv.vadc.nxv2i16.i16( + , , i16, , @@ -675,6 +736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i16.i16( + undef, %0, i16 %1, %2, @@ -684,6 +746,7 @@ } declare @llvm.riscv.vadc.nxv4i16.i16( + , , i16, , @@ -697,6 +760,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i16.i16( + undef, %0, i16 %1, %2, @@ -706,6 +770,7 @@ } declare @llvm.riscv.vadc.nxv8i16.i16( + , , i16, , @@ -719,6 +784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i16.i16( + undef, %0, i16 %1, %2, @@ -728,6 +794,7 @@ } declare @llvm.riscv.vadc.nxv16i16.i16( + , , i16, , @@ -741,6 +808,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv16i16.i16( + undef, %0, i16 %1, %2, @@ -750,6 +818,7 @@ } declare @llvm.riscv.vadc.nxv32i16.i16( + , , i16, , @@ -763,6 +832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv32i16.i16( + undef, %0, i16 %1, %2, @@ -772,6 +842,7 @@ } declare @llvm.riscv.vadc.nxv1i32.i32( + , , i32, , @@ -785,6 +856,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i32.i32( + undef, %0, i32 %1, %2, @@ -794,6 +866,7 @@ } declare @llvm.riscv.vadc.nxv2i32.i32( + , , i32, , @@ -807,6 +880,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i32.i32( + undef, %0, i32 %1, %2, @@ -816,6 +890,7 @@ } declare @llvm.riscv.vadc.nxv4i32.i32( + , , i32, , @@ -829,6 +904,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i32.i32( + undef, %0, i32 %1, %2, @@ -838,6 +914,7 @@ } declare @llvm.riscv.vadc.nxv8i32.i32( + , , i32, , @@ -851,6 +928,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i32.i32( + undef, %0, i32 %1, %2, @@ -860,6 +938,7 @@ } declare @llvm.riscv.vadc.nxv16i32.i32( + , , i32, , @@ -873,6 +952,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv16i32.i32( + undef, %0, i32 %1, %2, @@ -882,6 +962,7 @@ } declare @llvm.riscv.vadc.nxv1i64.i64( + , , i64, , @@ -901,6 +982,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i64.i64( + undef, %0, i64 %1, %2, @@ -910,6 +992,7 @@ } declare @llvm.riscv.vadc.nxv2i64.i64( + , , i64, , @@ -929,6 +1012,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i64.i64( + undef, %0, i64 %1, %2, @@ -938,6 +1022,7 @@ } declare @llvm.riscv.vadc.nxv4i64.i64( + , , i64, , @@ -957,6 +1042,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i64.i64( + undef, %0, i64 %1, %2, @@ -966,6 +1052,7 @@ } declare @llvm.riscv.vadc.nxv8i64.i64( + , , i64, , @@ -985,6 +1072,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i64.i64( + undef, %0, i64 %1, %2, @@ -1001,6 +1089,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i8.i8( + undef, %0, i8 -9, %1, @@ -1017,6 +1106,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i8.i8( + undef, %0, i8 9, %1, @@ -1033,6 +1123,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i8.i8( + undef, %0, i8 -9, %1, @@ -1049,6 +1140,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i8.i8( + undef, %0, i8 9, %1, @@ -1065,6 +1157,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv16i8.i8( + undef, %0, i8 -9, %1, @@ -1081,6 +1174,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv32i8.i8( + undef, %0, i8 9, %1, @@ -1097,6 +1191,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv64i8.i8( + undef, %0, i8 -9, %1, @@ -1113,6 +1208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i16.i16( + undef, %0, i16 9, %1, @@ -1129,6 +1225,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i16.i16( + undef, %0, i16 -9, %1, @@ -1145,6 +1242,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i16.i16( + undef, %0, i16 9, %1, @@ -1161,6 +1259,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i16.i16( + undef, %0, i16 -9, %1, @@ -1177,6 +1276,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv16i16.i16( + undef, %0, i16 9, %1, @@ -1193,6 +1293,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv32i16.i16( + undef, %0, i16 -9, %1, @@ -1209,6 +1310,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i32.i32( + undef, %0, i32 9, %1, @@ -1225,6 +1327,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i32.i32( + undef, %0, i32 -9, %1, @@ -1241,6 +1344,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i32.i32( + undef, %0, i32 9, %1, @@ -1257,6 +1361,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i32.i32( + undef, %0, i32 -9, %1, @@ -1273,6 +1378,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv16i32.i32( + undef, %0, i32 9, %1, @@ -1289,6 +1395,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i64.i64( + undef, %0, i64 9, %1, @@ -1305,6 +1412,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i64.i64( + undef, %0, i64 -9, %1, @@ -1321,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i64.i64( + undef, %0, i64 9, %1, @@ -1337,6 +1446,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i64.i64( + undef, %0, i64 -9, %1, diff --git a/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vadc.nxv1i8.nxv1i8( + , , , , @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i8.nxv1i8( + undef, %0, %1, %2, @@ -24,6 +26,7 @@ } declare @llvm.riscv.vadc.nxv2i8.nxv2i8( + , , , , @@ -37,6 +40,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i8.nxv2i8( + undef, %0, %1, %2, @@ -46,6 +50,7 @@ } declare @llvm.riscv.vadc.nxv4i8.nxv4i8( + , , , , @@ -59,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i8.nxv4i8( + undef, %0, %1, %2, @@ -68,6 +74,7 @@ } declare @llvm.riscv.vadc.nxv8i8.nxv8i8( + , , , , @@ -81,6 +88,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i8.nxv8i8( + undef, %0, %1, %2, @@ -90,6 +98,7 @@ } declare @llvm.riscv.vadc.nxv16i8.nxv16i8( + , , , , @@ -103,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv16i8.nxv16i8( + undef, %0, %1, %2, @@ -112,6 +122,7 @@ } declare @llvm.riscv.vadc.nxv32i8.nxv32i8( + , , , , @@ -125,6 +136,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv32i8.nxv32i8( + undef, %0, %1, %2, @@ -134,6 +146,7 @@ } declare @llvm.riscv.vadc.nxv64i8.nxv64i8( + , , , , @@ -147,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv64i8.nxv64i8( + undef, %0, %1, %2, @@ -156,6 +170,7 @@ } declare @llvm.riscv.vadc.nxv1i16.nxv1i16( + , , , , @@ -169,6 +184,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i16.nxv1i16( + undef, %0, %1, %2, @@ -178,6 +194,7 @@ } declare @llvm.riscv.vadc.nxv2i16.nxv2i16( + , , , , @@ -191,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i16.nxv2i16( + undef, %0, %1, %2, @@ -200,6 +218,7 @@ } declare @llvm.riscv.vadc.nxv4i16.nxv4i16( + , , , , @@ -213,6 +232,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i16.nxv4i16( + undef, %0, %1, %2, @@ -222,6 +242,7 @@ } declare @llvm.riscv.vadc.nxv8i16.nxv8i16( + , , , , @@ -235,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i16.nxv8i16( + undef, %0, %1, %2, @@ -244,6 +266,7 @@ } declare @llvm.riscv.vadc.nxv16i16.nxv16i16( + , , , , @@ -257,6 +280,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv16i16.nxv16i16( + undef, %0, %1, %2, @@ -266,6 +290,7 @@ } declare @llvm.riscv.vadc.nxv32i16.nxv32i16( + , , , , @@ -279,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv32i16.nxv32i16( + undef, %0, %1, %2, @@ -288,6 +314,7 @@ } declare @llvm.riscv.vadc.nxv1i32.nxv1i32( + , , , , @@ -301,6 +328,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i32.nxv1i32( + undef, %0, %1, %2, @@ -310,6 +338,7 @@ } declare @llvm.riscv.vadc.nxv2i32.nxv2i32( + , , , , @@ -323,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i32.nxv2i32( + undef, %0, %1, %2, @@ -332,6 +362,7 @@ } declare @llvm.riscv.vadc.nxv4i32.nxv4i32( + , , , , @@ -345,6 +376,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i32.nxv4i32( + undef, %0, %1, %2, @@ -354,6 +386,7 @@ } declare @llvm.riscv.vadc.nxv8i32.nxv8i32( + , , , , @@ -367,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i32.nxv8i32( + undef, %0, %1, %2, @@ -376,6 +410,7 @@ } declare @llvm.riscv.vadc.nxv16i32.nxv16i32( + , , , , @@ -389,6 +424,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv16i32.nxv16i32( + undef, %0, %1, %2, @@ -398,6 +434,7 @@ } declare @llvm.riscv.vadc.nxv1i64.nxv1i64( + , , , , @@ -411,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i64.nxv1i64( + undef, %0, %1, %2, @@ -420,6 +458,7 @@ } declare @llvm.riscv.vadc.nxv2i64.nxv2i64( + , , , , @@ -433,6 +472,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i64.nxv2i64( + undef, %0, %1, %2, @@ -442,6 +482,7 @@ } declare @llvm.riscv.vadc.nxv4i64.nxv4i64( + , , , , @@ -455,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i64.nxv4i64( + undef, %0, %1, %2, @@ -464,6 +506,7 @@ } declare @llvm.riscv.vadc.nxv8i64.nxv8i64( + , , , , @@ -477,6 +520,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i64.nxv8i64( + undef, %0, %1, %2, @@ -486,6 +530,7 @@ } declare @llvm.riscv.vadc.nxv1i8.i8( + , , i8, , @@ -499,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i8.i8( + undef, %0, i8 %1, %2, @@ -508,6 +554,7 @@ } declare @llvm.riscv.vadc.nxv2i8.i8( + , , i8, , @@ -521,6 +568,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i8.i8( + undef, %0, i8 %1, %2, @@ -530,6 +578,7 @@ } declare @llvm.riscv.vadc.nxv4i8.i8( + , , i8, , @@ -543,6 +592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i8.i8( + undef, %0, i8 %1, %2, @@ -552,6 +602,7 @@ } declare @llvm.riscv.vadc.nxv8i8.i8( + , , i8, , @@ -565,6 +616,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i8.i8( + undef, %0, i8 %1, %2, @@ -574,6 +626,7 @@ } declare @llvm.riscv.vadc.nxv16i8.i8( + , , i8, , @@ -587,6 +640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv16i8.i8( + undef, %0, i8 %1, %2, @@ -596,6 +650,7 @@ } declare @llvm.riscv.vadc.nxv32i8.i8( + , , i8, , @@ -609,6 +664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv32i8.i8( + undef, %0, i8 %1, %2, @@ -618,6 +674,7 @@ } declare @llvm.riscv.vadc.nxv64i8.i8( + , , i8, , @@ -631,6 +688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv64i8.i8( + undef, %0, i8 %1, %2, @@ -640,6 +698,7 @@ } declare @llvm.riscv.vadc.nxv1i16.i16( + , , i16, , @@ -653,6 +712,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i16.i16( + undef, %0, i16 %1, %2, @@ -662,6 +722,7 @@ } declare @llvm.riscv.vadc.nxv2i16.i16( + , , i16, , @@ -675,6 +736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i16.i16( + undef, %0, i16 %1, %2, @@ -684,6 +746,7 @@ } declare @llvm.riscv.vadc.nxv4i16.i16( + , , i16, , @@ -697,6 +760,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i16.i16( + undef, %0, i16 %1, %2, @@ -706,6 +770,7 @@ } declare @llvm.riscv.vadc.nxv8i16.i16( + , , i16, , @@ -719,6 +784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i16.i16( + undef, %0, i16 %1, %2, @@ -728,6 +794,7 @@ } declare @llvm.riscv.vadc.nxv16i16.i16( + , , i16, , @@ -741,6 +808,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv16i16.i16( + undef, %0, i16 %1, %2, @@ -750,6 +818,7 @@ } declare @llvm.riscv.vadc.nxv32i16.i16( + , , i16, , @@ -763,6 +832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv32i16.i16( + undef, %0, i16 %1, %2, @@ -772,6 +842,7 @@ } declare @llvm.riscv.vadc.nxv1i32.i32( + , , i32, , @@ -785,6 +856,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i32.i32( + undef, %0, i32 %1, %2, @@ -794,6 +866,7 @@ } declare @llvm.riscv.vadc.nxv2i32.i32( + , , i32, , @@ -807,6 +880,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i32.i32( + undef, %0, i32 %1, %2, @@ -816,6 +890,7 @@ } declare @llvm.riscv.vadc.nxv4i32.i32( + , , i32, , @@ -829,6 +904,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i32.i32( + undef, %0, i32 %1, %2, @@ -838,6 +914,7 @@ } declare @llvm.riscv.vadc.nxv8i32.i32( + , , i32, , @@ -851,6 +928,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i32.i32( + undef, %0, i32 %1, %2, @@ -860,6 +938,7 @@ } declare @llvm.riscv.vadc.nxv16i32.i32( + , , i32, , @@ -873,6 +952,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv16i32.i32( + undef, %0, i32 %1, %2, @@ -882,6 +962,7 @@ } declare @llvm.riscv.vadc.nxv1i64.i64( + , , i64, , @@ -895,6 +976,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i64.i64( + undef, %0, i64 %1, %2, @@ -904,6 +986,7 @@ } declare @llvm.riscv.vadc.nxv2i64.i64( + , , i64, , @@ -917,6 +1000,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i64.i64( + undef, %0, i64 %1, %2, @@ -926,6 +1010,7 @@ } declare @llvm.riscv.vadc.nxv4i64.i64( + , , i64, , @@ -939,6 +1024,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i64.i64( + undef, %0, i64 %1, %2, @@ -948,6 +1034,7 @@ } declare @llvm.riscv.vadc.nxv8i64.i64( + , , i64, , @@ -961,6 +1048,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i64.i64( + undef, %0, i64 %1, %2, @@ -977,6 +1065,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i8.i8( + undef, %0, i8 9, %1, @@ -993,6 +1082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i8.i8( + undef, %0, i8 -9, %1, @@ -1009,6 +1099,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i8.i8( + undef, %0, i8 9, %1, @@ -1025,6 +1116,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i8.i8( + undef, %0, i8 -9, %1, @@ -1041,6 +1133,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv16i8.i8( + undef, %0, i8 9, %1, @@ -1057,6 +1150,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv32i8.i8( + undef, %0, i8 -9, %1, @@ -1073,6 +1167,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv64i8.i8( + undef, %0, i8 9, %1, @@ -1089,6 +1184,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i16.i16( + undef, %0, i16 -9, %1, @@ -1105,6 +1201,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i16.i16( + undef, %0, i16 9, %1, @@ -1121,6 +1218,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i16.i16( + undef, %0, i16 -9, %1, @@ -1137,6 +1235,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i16.i16( + undef, %0, i16 9, %1, @@ -1153,6 +1252,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv16i16.i16( + undef, %0, i16 -9, %1, @@ -1169,6 +1269,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv32i16.i16( + undef, %0, i16 9, %1, @@ -1185,6 +1286,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i32.i32( + undef, %0, i32 -9, %1, @@ -1201,6 +1303,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i32.i32( + undef, %0, i32 9, %1, @@ -1217,6 +1320,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i32.i32( + undef, %0, i32 -9, %1, @@ -1233,6 +1337,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i32.i32( + undef, %0, i32 9, %1, @@ -1249,6 +1354,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv16i32.i32( + undef, %0, i32 -9, %1, @@ -1265,6 +1371,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv1i64.i64( + undef, %0, i64 9, %1, @@ -1281,6 +1388,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv2i64.i64( + undef, %0, i64 -9, %1, @@ -1297,6 +1405,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv4i64.i64( + undef, %0, i64 9, %1, @@ -1313,6 +1422,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadc.nxv8i64.i64( + undef, %0, i64 -9, %1, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmerge.ll b/llvm/test/CodeGen/RISCV/rvv/vfmerge.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmerge.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmerge.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmerge.nxv1f16.nxv1f16( + , , , , @@ -17,6 +18,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv1f16.nxv1f16( + undef, %0, %1, %2, @@ -26,6 +28,7 @@ } declare @llvm.riscv.vfmerge.nxv1f16.f16( + , , half, , @@ -39,6 +42,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv1f16.f16( + undef, %0, half %1, %2, @@ -48,6 +52,7 @@ } declare @llvm.riscv.vfmerge.nxv2f16.nxv2f16( + , , , , @@ -61,6 +66,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv2f16.nxv2f16( + undef, %0, %1, %2, @@ -70,6 +76,7 @@ } declare @llvm.riscv.vfmerge.nxv2f16.f16( + , , half, , @@ -83,6 +90,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv2f16.f16( + undef, %0, half %1, %2, @@ -92,6 +100,7 @@ } declare @llvm.riscv.vfmerge.nxv4f16.nxv4f16( + , , , , @@ -105,6 +114,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv4f16.nxv4f16( + undef, %0, %1, %2, @@ -114,6 +124,7 @@ } declare @llvm.riscv.vfmerge.nxv4f16.f16( + , , half, , @@ -127,6 +138,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv4f16.f16( + undef, %0, half %1, %2, @@ -136,6 +148,7 @@ } declare @llvm.riscv.vfmerge.nxv8f16.nxv8f16( + , , , , @@ -149,6 +162,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv8f16.nxv8f16( + undef, %0, %1, %2, @@ -158,6 +172,7 @@ } declare @llvm.riscv.vfmerge.nxv8f16.f16( + , , half, , @@ -171,6 +186,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv8f16.f16( + undef, %0, half %1, %2, @@ -180,6 +196,7 @@ } declare @llvm.riscv.vfmerge.nxv16f16.nxv16f16( + , , , , @@ -193,6 +210,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv16f16.nxv16f16( + undef, %0, %1, %2, @@ -202,6 +220,7 @@ } declare @llvm.riscv.vfmerge.nxv16f16.f16( + , , half, , @@ -215,6 +234,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv16f16.f16( + undef, %0, half %1, %2, @@ -224,6 +244,7 @@ } declare @llvm.riscv.vfmerge.nxv32f16.nxv32f16( + , , , , @@ -237,6 +258,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv32f16.nxv32f16( + undef, %0, %1, %2, @@ -246,6 +268,7 @@ } declare @llvm.riscv.vfmerge.nxv32f16.f16( + , , half, , @@ -259,6 +282,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv32f16.f16( + undef, %0, half %1, %2, @@ -268,6 +292,7 @@ } declare @llvm.riscv.vfmerge.nxv1f32.nxv1f32( + , , , , @@ -281,6 +306,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv1f32.nxv1f32( + undef, %0, %1, %2, @@ -290,6 +316,7 @@ } declare @llvm.riscv.vfmerge.nxv1f32.f32( + , , float, , @@ -303,6 +330,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv1f32.f32( + undef, %0, float %1, %2, @@ -312,6 +340,7 @@ } declare @llvm.riscv.vfmerge.nxv2f32.nxv2f32( + , , , , @@ -325,6 +354,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv2f32.nxv2f32( + undef, %0, %1, %2, @@ -334,6 +364,7 @@ } declare @llvm.riscv.vfmerge.nxv2f32.f32( + , , float, , @@ -347,6 +378,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv2f32.f32( + undef, %0, float %1, %2, @@ -356,6 +388,7 @@ } declare @llvm.riscv.vfmerge.nxv4f32.nxv4f32( + , , , , @@ -369,6 +402,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv4f32.nxv4f32( + undef, %0, %1, %2, @@ -378,6 +412,7 @@ } declare @llvm.riscv.vfmerge.nxv4f32.f32( + , , float, , @@ -391,6 +426,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv4f32.f32( + undef, %0, float %1, %2, @@ -400,6 +436,7 @@ } declare @llvm.riscv.vfmerge.nxv8f32.nxv8f32( + , , , , @@ -413,6 +450,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv8f32.nxv8f32( + undef, %0, %1, %2, @@ -422,6 +460,7 @@ } declare @llvm.riscv.vfmerge.nxv8f32.f32( + , , float, , @@ -435,6 +474,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv8f32.f32( + undef, %0, float %1, %2, @@ -444,6 +484,7 @@ } declare @llvm.riscv.vfmerge.nxv16f32.nxv16f32( + , , , , @@ -457,6 +498,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv16f32.nxv16f32( + undef, %0, %1, %2, @@ -466,6 +508,7 @@ } declare @llvm.riscv.vfmerge.nxv16f32.f32( + , , float, , @@ -479,6 +522,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv16f32.f32( + undef, %0, float %1, %2, @@ -488,6 +532,7 @@ } declare @llvm.riscv.vfmerge.nxv1f64.nxv1f64( + , , , , @@ -501,6 +546,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv1f64.nxv1f64( + undef, %0, %1, %2, @@ -510,6 +556,7 @@ } declare @llvm.riscv.vfmerge.nxv1f64.f64( + , , double, , @@ -523,6 +570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv1f64.f64( + undef, %0, double %1, %2, @@ -532,6 +580,7 @@ } declare @llvm.riscv.vfmerge.nxv2f64.nxv2f64( + , , , , @@ -545,6 +594,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv2f64.nxv2f64( + undef, %0, %1, %2, @@ -554,6 +604,7 @@ } declare @llvm.riscv.vfmerge.nxv2f64.f64( + , , double, , @@ -567,6 +618,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv2f64.f64( + undef, %0, double %1, %2, @@ -576,6 +628,7 @@ } declare @llvm.riscv.vfmerge.nxv4f64.nxv4f64( + , , , , @@ -589,6 +642,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv4f64.nxv4f64( + undef, %0, %1, %2, @@ -598,6 +652,7 @@ } declare @llvm.riscv.vfmerge.nxv4f64.f64( + , , double, , @@ -611,6 +666,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv4f64.f64( + undef, %0, double %1, %2, @@ -620,6 +676,7 @@ } declare @llvm.riscv.vfmerge.nxv8f64.nxv8f64( + , , , , @@ -633,6 +690,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv8f64.nxv8f64( + undef, %0, %1, %2, @@ -642,6 +700,7 @@ } declare @llvm.riscv.vfmerge.nxv8f64.f64( + , , double, , @@ -655,6 +714,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv8f64.f64( + undef, %0, double %1, %2, @@ -671,6 +731,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv1f16.f16( + undef, %0, half zeroinitializer, %1, @@ -687,6 +748,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv2f16.f16( + undef, %0, half zeroinitializer, %1, @@ -703,6 +765,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv4f16.f16( + undef, %0, half zeroinitializer, %1, @@ -719,6 +782,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv8f16.f16( + undef, %0, half zeroinitializer, %1, @@ -735,6 +799,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv16f16.f16( + undef, %0, half zeroinitializer, %1, @@ -751,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv32f16.f16( + undef, %0, half zeroinitializer, %1, @@ -767,6 +833,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv1f32.f32( + undef, %0, float zeroinitializer, %1, @@ -783,6 +850,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv2f32.f32( + undef, %0, float zeroinitializer, %1, @@ -799,6 +867,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv4f32.f32( + undef, %0, float zeroinitializer, %1, @@ -815,6 +884,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv8f32.f32( + undef, %0, float zeroinitializer, %1, @@ -831,6 +901,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv16f32.f32( + undef, %0, float zeroinitializer, %1, @@ -847,6 +918,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv1f64.f64( + undef, %0, double zeroinitializer, %1, @@ -863,6 +935,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv2f64.f64( + undef, %0, double zeroinitializer, %1, @@ -879,6 +952,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv4f64.f64( + undef, %0, double zeroinitializer, %1, @@ -895,6 +969,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv8f64.f64( + undef, %0, double zeroinitializer, %1, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmerge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmerge-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmerge-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmerge-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmerge.nxv1i8.nxv1i8( + , , , , @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i8.nxv1i8( + undef, %0, %1, %2, @@ -24,6 +26,7 @@ } declare @llvm.riscv.vmerge.nxv2i8.nxv2i8( + , , , , @@ -37,6 +40,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i8.nxv2i8( + undef, %0, %1, %2, @@ -46,6 +50,7 @@ } declare @llvm.riscv.vmerge.nxv4i8.nxv4i8( + , , , , @@ -59,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i8.nxv4i8( + undef, %0, %1, %2, @@ -68,6 +74,7 @@ } declare @llvm.riscv.vmerge.nxv8i8.nxv8i8( + , , , , @@ -81,6 +88,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i8.nxv8i8( + undef, %0, %1, %2, @@ -90,6 +98,7 @@ } declare @llvm.riscv.vmerge.nxv16i8.nxv16i8( + , , , , @@ -103,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv16i8.nxv16i8( + undef, %0, %1, %2, @@ -112,6 +122,7 @@ } declare @llvm.riscv.vmerge.nxv32i8.nxv32i8( + , , , , @@ -125,6 +136,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv32i8.nxv32i8( + undef, %0, %1, %2, @@ -134,6 +146,7 @@ } declare @llvm.riscv.vmerge.nxv64i8.nxv64i8( + , , , , @@ -147,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv64i8.nxv64i8( + undef, %0, %1, %2, @@ -156,6 +170,7 @@ } declare @llvm.riscv.vmerge.nxv1i16.nxv1i16( + , , , , @@ -169,6 +184,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i16.nxv1i16( + undef, %0, %1, %2, @@ -178,6 +194,7 @@ } declare @llvm.riscv.vmerge.nxv2i16.nxv2i16( + , , , , @@ -191,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i16.nxv2i16( + undef, %0, %1, %2, @@ -200,6 +218,7 @@ } declare @llvm.riscv.vmerge.nxv4i16.nxv4i16( + , , , , @@ -213,6 +232,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i16.nxv4i16( + undef, %0, %1, %2, @@ -222,6 +242,7 @@ } declare @llvm.riscv.vmerge.nxv8i16.nxv8i16( + , , , , @@ -235,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i16.nxv8i16( + undef, %0, %1, %2, @@ -244,6 +266,7 @@ } declare @llvm.riscv.vmerge.nxv16i16.nxv16i16( + , , , , @@ -257,6 +280,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv16i16.nxv16i16( + undef, %0, %1, %2, @@ -266,6 +290,7 @@ } declare @llvm.riscv.vmerge.nxv32i16.nxv32i16( + , , , , @@ -279,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv32i16.nxv32i16( + undef, %0, %1, %2, @@ -288,6 +314,7 @@ } declare @llvm.riscv.vmerge.nxv1i32.nxv1i32( + , , , , @@ -301,6 +328,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i32.nxv1i32( + undef, %0, %1, %2, @@ -310,6 +338,7 @@ } declare @llvm.riscv.vmerge.nxv2i32.nxv2i32( + , , , , @@ -323,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i32.nxv2i32( + undef, %0, %1, %2, @@ -332,6 +362,7 @@ } declare @llvm.riscv.vmerge.nxv4i32.nxv4i32( + , , , , @@ -345,6 +376,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i32.nxv4i32( + undef, %0, %1, %2, @@ -354,6 +386,7 @@ } declare @llvm.riscv.vmerge.nxv8i32.nxv8i32( + , , , , @@ -367,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i32.nxv8i32( + undef, %0, %1, %2, @@ -376,6 +410,7 @@ } declare @llvm.riscv.vmerge.nxv16i32.nxv16i32( + , , , , @@ -389,6 +424,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv16i32.nxv16i32( + undef, %0, %1, %2, @@ -398,6 +434,7 @@ } declare @llvm.riscv.vmerge.nxv1i64.nxv1i64( + , , , , @@ -411,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i64.nxv1i64( + undef, %0, %1, %2, @@ -420,6 +458,7 @@ } declare @llvm.riscv.vmerge.nxv2i64.nxv2i64( + , , , , @@ -433,6 +472,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i64.nxv2i64( + undef, %0, %1, %2, @@ -442,6 +482,7 @@ } declare @llvm.riscv.vmerge.nxv4i64.nxv4i64( + , , , , @@ -455,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i64.nxv4i64( + undef, %0, %1, %2, @@ -464,6 +506,7 @@ } declare @llvm.riscv.vmerge.nxv8i64.nxv8i64( + , , , , @@ -477,6 +520,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i64.nxv8i64( + undef, %0, %1, %2, @@ -486,6 +530,7 @@ } declare @llvm.riscv.vmerge.nxv1i8.i8( + , , i8, , @@ -499,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i8.i8( + undef, %0, i8 %1, %2, @@ -508,6 +554,7 @@ } declare @llvm.riscv.vmerge.nxv2i8.i8( + , , i8, , @@ -521,6 +568,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i8.i8( + undef, %0, i8 %1, %2, @@ -530,6 +578,7 @@ } declare @llvm.riscv.vmerge.nxv4i8.i8( + , , i8, , @@ -543,6 +592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i8.i8( + undef, %0, i8 %1, %2, @@ -552,6 +602,7 @@ } declare @llvm.riscv.vmerge.nxv8i8.i8( + , , i8, , @@ -565,6 +616,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i8.i8( + undef, %0, i8 %1, %2, @@ -574,6 +626,7 @@ } declare @llvm.riscv.vmerge.nxv16i8.i8( + , , i8, , @@ -587,6 +640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv16i8.i8( + undef, %0, i8 %1, %2, @@ -596,6 +650,7 @@ } declare @llvm.riscv.vmerge.nxv32i8.i8( + , , i8, , @@ -609,6 +664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv32i8.i8( + undef, %0, i8 %1, %2, @@ -618,6 +674,7 @@ } declare @llvm.riscv.vmerge.nxv64i8.i8( + , , i8, , @@ -631,6 +688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv64i8.i8( + undef, %0, i8 %1, %2, @@ -640,6 +698,7 @@ } declare @llvm.riscv.vmerge.nxv1i16.i16( + , , i16, , @@ -653,6 +712,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i16.i16( + undef, %0, i16 %1, %2, @@ -662,6 +722,7 @@ } declare @llvm.riscv.vmerge.nxv2i16.i16( + , , i16, , @@ -675,6 +736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i16.i16( + undef, %0, i16 %1, %2, @@ -684,6 +746,7 @@ } declare @llvm.riscv.vmerge.nxv4i16.i16( + , , i16, , @@ -697,6 +760,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i16.i16( + undef, %0, i16 %1, %2, @@ -706,6 +770,7 @@ } declare @llvm.riscv.vmerge.nxv8i16.i16( + , , i16, , @@ -719,6 +784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i16.i16( + undef, %0, i16 %1, %2, @@ -728,6 +794,7 @@ } declare @llvm.riscv.vmerge.nxv16i16.i16( + , , i16, , @@ -741,6 +808,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv16i16.i16( + undef, %0, i16 %1, %2, @@ -750,6 +818,7 @@ } declare @llvm.riscv.vmerge.nxv32i16.i16( + , , i16, , @@ -763,6 +832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv32i16.i16( + undef, %0, i16 %1, %2, @@ -772,6 +842,7 @@ } declare @llvm.riscv.vmerge.nxv1i32.i32( + , , i32, , @@ -785,6 +856,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i32.i32( + undef, %0, i32 %1, %2, @@ -794,6 +866,7 @@ } declare @llvm.riscv.vmerge.nxv2i32.i32( + , , i32, , @@ -807,6 +880,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i32.i32( + undef, %0, i32 %1, %2, @@ -816,6 +890,7 @@ } declare @llvm.riscv.vmerge.nxv4i32.i32( + , , i32, , @@ -829,6 +904,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i32.i32( + undef, %0, i32 %1, %2, @@ -838,6 +914,7 @@ } declare @llvm.riscv.vmerge.nxv8i32.i32( + , , i32, , @@ -851,6 +928,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i32.i32( + undef, %0, i32 %1, %2, @@ -860,6 +938,7 @@ } declare @llvm.riscv.vmerge.nxv16i32.i32( + , , i32, , @@ -873,6 +952,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv16i32.i32( + undef, %0, i32 %1, %2, @@ -882,6 +962,7 @@ } declare @llvm.riscv.vmerge.nxv1i64.i64( + , , i64, , @@ -901,6 +982,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i64.i64( + undef, %0, i64 %1, %2, @@ -910,6 +992,7 @@ } declare @llvm.riscv.vmerge.nxv2i64.i64( + , , i64, , @@ -929,6 +1012,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i64.i64( + undef, %0, i64 %1, %2, @@ -938,6 +1022,7 @@ } declare @llvm.riscv.vmerge.nxv4i64.i64( + , , i64, , @@ -957,6 +1042,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i64.i64( + undef, %0, i64 %1, %2, @@ -966,6 +1052,7 @@ } declare @llvm.riscv.vmerge.nxv8i64.i64( + , , i64, , @@ -985,6 +1072,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i64.i64( + undef, %0, i64 %1, %2, @@ -1001,6 +1089,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i8.i8( + undef, %0, i8 9, %1, @@ -1017,6 +1106,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i8.i8( + undef, %0, i8 9, %1, @@ -1033,6 +1123,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i8.i8( + undef, %0, i8 9, %1, @@ -1049,6 +1140,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i8.i8( + undef, %0, i8 9, %1, @@ -1065,6 +1157,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv16i8.i8( + undef, %0, i8 9, %1, @@ -1081,6 +1174,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv32i8.i8( + undef, %0, i8 9, %1, @@ -1097,6 +1191,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv64i8.i8( + undef, %0, i8 9, %1, @@ -1113,6 +1208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i16.i16( + undef, %0, i16 9, %1, @@ -1129,6 +1225,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i16.i16( + undef, %0, i16 9, %1, @@ -1145,6 +1242,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i16.i16( + undef, %0, i16 9, %1, @@ -1161,6 +1259,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i16.i16( + undef, %0, i16 9, %1, @@ -1177,6 +1276,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv16i16.i16( + undef, %0, i16 9, %1, @@ -1193,6 +1293,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv32i16.i16( + undef, %0, i16 9, %1, @@ -1209,6 +1310,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i32.i32( + undef, %0, i32 9, %1, @@ -1225,6 +1327,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i32.i32( + undef, %0, i32 9, %1, @@ -1241,6 +1344,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i32.i32( + undef, %0, i32 9, %1, @@ -1257,6 +1361,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i32.i32( + undef, %0, i32 9, %1, @@ -1273,6 +1378,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv16i32.i32( + undef, %0, i32 9, %1, @@ -1289,6 +1395,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i64.i64( + undef, %0, i64 9, %1, @@ -1305,6 +1412,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i64.i64( + undef, %0, i64 9, %1, @@ -1321,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i64.i64( + undef, %0, i64 9, %1, @@ -1337,6 +1446,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i64.i64( + undef, %0, i64 9, %1, @@ -1346,6 +1456,7 @@ } declare @llvm.riscv.vmerge.nxv1f16.nxv1f16( + , , , , @@ -1359,6 +1470,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1f16.nxv1f16( + undef, %0, %1, %2, @@ -1368,6 +1480,7 @@ } declare @llvm.riscv.vmerge.nxv2f16.nxv2f16( + , , , , @@ -1381,6 +1494,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2f16.nxv2f16( + undef, %0, %1, %2, @@ -1390,6 +1504,7 @@ } declare @llvm.riscv.vmerge.nxv4f16.nxv4f16( + , , , , @@ -1403,6 +1518,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4f16.nxv4f16( + undef, %0, %1, %2, @@ -1412,6 +1528,7 @@ } declare @llvm.riscv.vmerge.nxv8f16.nxv8f16( + , , , , @@ -1425,6 +1542,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8f16.nxv8f16( + undef, %0, %1, %2, @@ -1434,6 +1552,7 @@ } declare @llvm.riscv.vmerge.nxv16f16.nxv16f16( + , , , , @@ -1447,6 +1566,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv16f16.nxv16f16( + undef, %0, %1, %2, @@ -1456,6 +1576,7 @@ } declare @llvm.riscv.vmerge.nxv32f16.nxv32f16( + , , , , @@ -1469,6 +1590,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv32f16.nxv32f16( + undef, %0, %1, %2, @@ -1478,6 +1600,7 @@ } declare @llvm.riscv.vmerge.nxv1f32.nxv1f32( + , , , , @@ -1491,6 +1614,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1f32.nxv1f32( + undef, %0, %1, %2, @@ -1500,6 +1624,7 @@ } declare @llvm.riscv.vmerge.nxv2f32.nxv2f32( + , , , , @@ -1513,6 +1638,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2f32.nxv2f32( + undef, %0, %1, %2, @@ -1522,6 +1648,7 @@ } declare @llvm.riscv.vmerge.nxv4f32.nxv4f32( + , , , , @@ -1535,6 +1662,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4f32.nxv4f32( + undef, %0, %1, %2, @@ -1544,6 +1672,7 @@ } declare @llvm.riscv.vmerge.nxv8f32.nxv8f32( + , , , , @@ -1557,6 +1686,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8f32.nxv8f32( + undef, %0, %1, %2, @@ -1566,6 +1696,7 @@ } declare @llvm.riscv.vmerge.nxv16f32.nxv16f32( + , , , , @@ -1579,6 +1710,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv16f32.nxv16f32( + undef, %0, %1, %2, @@ -1588,6 +1720,7 @@ } declare @llvm.riscv.vmerge.nxv1f64.nxv1f64( + , , , , @@ -1601,6 +1734,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1f64.nxv1f64( + undef, %0, %1, %2, @@ -1610,6 +1744,7 @@ } declare @llvm.riscv.vmerge.nxv2f64.nxv2f64( + , , , , @@ -1623,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2f64.nxv2f64( + undef, %0, %1, %2, @@ -1632,6 +1768,7 @@ } declare @llvm.riscv.vmerge.nxv4f64.nxv4f64( + , , , , @@ -1645,6 +1782,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4f64.nxv4f64( + undef, %0, %1, %2, @@ -1654,6 +1792,7 @@ } declare @llvm.riscv.vmerge.nxv8f64.nxv8f64( + , , , , @@ -1667,6 +1806,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8f64.nxv8f64( + undef, %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmerge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmerge-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmerge-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmerge-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmerge.nxv1i8.nxv1i8( + , , , , @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i8.nxv1i8( + undef, %0, %1, %2, @@ -24,6 +26,7 @@ } declare @llvm.riscv.vmerge.nxv2i8.nxv2i8( + , , , , @@ -37,6 +40,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i8.nxv2i8( + undef, %0, %1, %2, @@ -46,6 +50,7 @@ } declare @llvm.riscv.vmerge.nxv4i8.nxv4i8( + , , , , @@ -59,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i8.nxv4i8( + undef, %0, %1, %2, @@ -68,6 +74,7 @@ } declare @llvm.riscv.vmerge.nxv8i8.nxv8i8( + , , , , @@ -81,6 +88,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i8.nxv8i8( + undef, %0, %1, %2, @@ -90,6 +98,7 @@ } declare @llvm.riscv.vmerge.nxv16i8.nxv16i8( + , , , , @@ -103,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv16i8.nxv16i8( + undef, %0, %1, %2, @@ -112,6 +122,7 @@ } declare @llvm.riscv.vmerge.nxv32i8.nxv32i8( + , , , , @@ -125,6 +136,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv32i8.nxv32i8( + undef, %0, %1, %2, @@ -134,6 +146,7 @@ } declare @llvm.riscv.vmerge.nxv64i8.nxv64i8( + , , , , @@ -147,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv64i8.nxv64i8( + undef, %0, %1, %2, @@ -156,6 +170,7 @@ } declare @llvm.riscv.vmerge.nxv1i16.nxv1i16( + , , , , @@ -169,6 +184,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i16.nxv1i16( + undef, %0, %1, %2, @@ -178,6 +194,7 @@ } declare @llvm.riscv.vmerge.nxv2i16.nxv2i16( + , , , , @@ -191,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i16.nxv2i16( + undef, %0, %1, %2, @@ -200,6 +218,7 @@ } declare @llvm.riscv.vmerge.nxv4i16.nxv4i16( + , , , , @@ -213,6 +232,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i16.nxv4i16( + undef, %0, %1, %2, @@ -222,6 +242,7 @@ } declare @llvm.riscv.vmerge.nxv8i16.nxv8i16( + , , , , @@ -235,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i16.nxv8i16( + undef, %0, %1, %2, @@ -244,6 +266,7 @@ } declare @llvm.riscv.vmerge.nxv16i16.nxv16i16( + , , , , @@ -257,6 +280,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv16i16.nxv16i16( + undef, %0, %1, %2, @@ -266,6 +290,7 @@ } declare @llvm.riscv.vmerge.nxv32i16.nxv32i16( + , , , , @@ -279,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv32i16.nxv32i16( + undef, %0, %1, %2, @@ -288,6 +314,7 @@ } declare @llvm.riscv.vmerge.nxv1i32.nxv1i32( + , , , , @@ -301,6 +328,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i32.nxv1i32( + undef, %0, %1, %2, @@ -310,6 +338,7 @@ } declare @llvm.riscv.vmerge.nxv2i32.nxv2i32( + , , , , @@ -323,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i32.nxv2i32( + undef, %0, %1, %2, @@ -332,6 +362,7 @@ } declare @llvm.riscv.vmerge.nxv4i32.nxv4i32( + , , , , @@ -345,6 +376,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i32.nxv4i32( + undef, %0, %1, %2, @@ -354,6 +386,7 @@ } declare @llvm.riscv.vmerge.nxv8i32.nxv8i32( + , , , , @@ -367,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i32.nxv8i32( + undef, %0, %1, %2, @@ -376,6 +410,7 @@ } declare @llvm.riscv.vmerge.nxv16i32.nxv16i32( + , , , , @@ -389,6 +424,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv16i32.nxv16i32( + undef, %0, %1, %2, @@ -398,6 +434,7 @@ } declare @llvm.riscv.vmerge.nxv1i64.nxv1i64( + , , , , @@ -411,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i64.nxv1i64( + undef, %0, %1, %2, @@ -420,6 +458,7 @@ } declare @llvm.riscv.vmerge.nxv2i64.nxv2i64( + , , , , @@ -433,6 +472,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i64.nxv2i64( + undef, %0, %1, %2, @@ -442,6 +482,7 @@ } declare @llvm.riscv.vmerge.nxv4i64.nxv4i64( + , , , , @@ -455,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i64.nxv4i64( + undef, %0, %1, %2, @@ -464,6 +506,7 @@ } declare @llvm.riscv.vmerge.nxv8i64.nxv8i64( + , , , , @@ -477,6 +520,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i64.nxv8i64( + undef, %0, %1, %2, @@ -486,6 +530,7 @@ } declare @llvm.riscv.vmerge.nxv1i8.i8( + , , i8, , @@ -499,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i8.i8( + undef, %0, i8 %1, %2, @@ -508,6 +554,7 @@ } declare @llvm.riscv.vmerge.nxv2i8.i8( + , , i8, , @@ -521,6 +568,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i8.i8( + undef, %0, i8 %1, %2, @@ -530,6 +578,7 @@ } declare @llvm.riscv.vmerge.nxv4i8.i8( + , , i8, , @@ -543,6 +592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i8.i8( + undef, %0, i8 %1, %2, @@ -552,6 +602,7 @@ } declare @llvm.riscv.vmerge.nxv8i8.i8( + , , i8, , @@ -565,6 +616,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i8.i8( + undef, %0, i8 %1, %2, @@ -574,6 +626,7 @@ } declare @llvm.riscv.vmerge.nxv16i8.i8( + , , i8, , @@ -587,6 +640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv16i8.i8( + undef, %0, i8 %1, %2, @@ -596,6 +650,7 @@ } declare @llvm.riscv.vmerge.nxv32i8.i8( + , , i8, , @@ -609,6 +664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv32i8.i8( + undef, %0, i8 %1, %2, @@ -618,6 +674,7 @@ } declare @llvm.riscv.vmerge.nxv64i8.i8( + , , i8, , @@ -631,6 +688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv64i8.i8( + undef, %0, i8 %1, %2, @@ -640,6 +698,7 @@ } declare @llvm.riscv.vmerge.nxv1i16.i16( + , , i16, , @@ -653,6 +712,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i16.i16( + undef, %0, i16 %1, %2, @@ -662,6 +722,7 @@ } declare @llvm.riscv.vmerge.nxv2i16.i16( + , , i16, , @@ -675,6 +736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i16.i16( + undef, %0, i16 %1, %2, @@ -684,6 +746,7 @@ } declare @llvm.riscv.vmerge.nxv4i16.i16( + , , i16, , @@ -697,6 +760,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i16.i16( + undef, %0, i16 %1, %2, @@ -706,6 +770,7 @@ } declare @llvm.riscv.vmerge.nxv8i16.i16( + , , i16, , @@ -719,6 +784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i16.i16( + undef, %0, i16 %1, %2, @@ -728,6 +794,7 @@ } declare @llvm.riscv.vmerge.nxv16i16.i16( + , , i16, , @@ -741,6 +808,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv16i16.i16( + undef, %0, i16 %1, %2, @@ -750,6 +818,7 @@ } declare @llvm.riscv.vmerge.nxv32i16.i16( + , , i16, , @@ -763,6 +832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv32i16.i16( + undef, %0, i16 %1, %2, @@ -772,6 +842,7 @@ } declare @llvm.riscv.vmerge.nxv1i32.i32( + , , i32, , @@ -785,6 +856,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i32.i32( + undef, %0, i32 %1, %2, @@ -794,6 +866,7 @@ } declare @llvm.riscv.vmerge.nxv2i32.i32( + , , i32, , @@ -807,6 +880,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i32.i32( + undef, %0, i32 %1, %2, @@ -816,6 +890,7 @@ } declare @llvm.riscv.vmerge.nxv4i32.i32( + , , i32, , @@ -829,6 +904,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i32.i32( + undef, %0, i32 %1, %2, @@ -838,6 +914,7 @@ } declare @llvm.riscv.vmerge.nxv8i32.i32( + , , i32, , @@ -851,6 +928,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i32.i32( + undef, %0, i32 %1, %2, @@ -860,6 +938,7 @@ } declare @llvm.riscv.vmerge.nxv16i32.i32( + , , i32, , @@ -873,6 +952,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv16i32.i32( + undef, %0, i32 %1, %2, @@ -882,6 +962,7 @@ } declare @llvm.riscv.vmerge.nxv1i64.i64( + , , i64, , @@ -895,6 +976,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i64.i64( + undef, %0, i64 %1, %2, @@ -904,6 +986,7 @@ } declare @llvm.riscv.vmerge.nxv2i64.i64( + , , i64, , @@ -917,6 +1000,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i64.i64( + undef, %0, i64 %1, %2, @@ -926,6 +1010,7 @@ } declare @llvm.riscv.vmerge.nxv4i64.i64( + , , i64, , @@ -939,6 +1024,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i64.i64( + undef, %0, i64 %1, %2, @@ -948,6 +1034,7 @@ } declare @llvm.riscv.vmerge.nxv8i64.i64( + , , i64, , @@ -961,6 +1048,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i64.i64( + undef, %0, i64 %1, %2, @@ -977,6 +1065,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i8.i8( + undef, %0, i8 9, %1, @@ -993,6 +1082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i8.i8( + undef, %0, i8 9, %1, @@ -1009,6 +1099,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i8.i8( + undef, %0, i8 9, %1, @@ -1025,6 +1116,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i8.i8( + undef, %0, i8 9, %1, @@ -1041,6 +1133,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv16i8.i8( + undef, %0, i8 9, %1, @@ -1057,6 +1150,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv32i8.i8( + undef, %0, i8 9, %1, @@ -1073,6 +1167,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv64i8.i8( + undef, %0, i8 9, %1, @@ -1089,6 +1184,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i16.i16( + undef, %0, i16 9, %1, @@ -1105,6 +1201,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i16.i16( + undef, %0, i16 9, %1, @@ -1121,6 +1218,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i16.i16( + undef, %0, i16 9, %1, @@ -1137,6 +1235,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i16.i16( + undef, %0, i16 9, %1, @@ -1153,6 +1252,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv16i16.i16( + undef, %0, i16 9, %1, @@ -1169,6 +1269,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv32i16.i16( + undef, %0, i16 9, %1, @@ -1185,6 +1286,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i32.i32( + undef, %0, i32 9, %1, @@ -1201,6 +1303,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i32.i32( + undef, %0, i32 9, %1, @@ -1217,6 +1320,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i32.i32( + undef, %0, i32 9, %1, @@ -1233,6 +1337,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i32.i32( + undef, %0, i32 9, %1, @@ -1249,6 +1354,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv16i32.i32( + undef, %0, i32 9, %1, @@ -1265,6 +1371,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i64.i64( + undef, %0, i64 9, %1, @@ -1281,6 +1388,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i64.i64( + undef, %0, i64 9, %1, @@ -1297,6 +1405,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i64.i64( + undef, %0, i64 9, %1, @@ -1313,6 +1422,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i64.i64( + undef, %0, i64 9, %1, @@ -1322,6 +1432,7 @@ } declare @llvm.riscv.vmerge.nxv1f16.nxv1f16( + , , , , @@ -1335,6 +1446,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1f16.nxv1f16( + undef, %0, %1, %2, @@ -1344,6 +1456,7 @@ } declare @llvm.riscv.vmerge.nxv2f16.nxv2f16( + , , , , @@ -1357,6 +1470,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2f16.nxv2f16( + undef, %0, %1, %2, @@ -1366,6 +1480,7 @@ } declare @llvm.riscv.vmerge.nxv4f16.nxv4f16( + , , , , @@ -1379,6 +1494,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4f16.nxv4f16( + undef, %0, %1, %2, @@ -1388,6 +1504,7 @@ } declare @llvm.riscv.vmerge.nxv8f16.nxv8f16( + , , , , @@ -1401,6 +1518,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8f16.nxv8f16( + undef, %0, %1, %2, @@ -1410,6 +1528,7 @@ } declare @llvm.riscv.vmerge.nxv16f16.nxv16f16( + , , , , @@ -1423,6 +1542,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv16f16.nxv16f16( + undef, %0, %1, %2, @@ -1432,6 +1552,7 @@ } declare @llvm.riscv.vmerge.nxv32f16.nxv32f16( + , , , , @@ -1445,6 +1566,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv32f16.nxv32f16( + undef, %0, %1, %2, @@ -1454,6 +1576,7 @@ } declare @llvm.riscv.vmerge.nxv1f32.nxv1f32( + , , , , @@ -1467,6 +1590,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1f32.nxv1f32( + undef, %0, %1, %2, @@ -1476,6 +1600,7 @@ } declare @llvm.riscv.vmerge.nxv2f32.nxv2f32( + , , , , @@ -1489,6 +1614,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2f32.nxv2f32( + undef, %0, %1, %2, @@ -1498,6 +1624,7 @@ } declare @llvm.riscv.vmerge.nxv4f32.nxv4f32( + , , , , @@ -1511,6 +1638,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4f32.nxv4f32( + undef, %0, %1, %2, @@ -1520,6 +1648,7 @@ } declare @llvm.riscv.vmerge.nxv8f32.nxv8f32( + , , , , @@ -1533,6 +1662,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8f32.nxv8f32( + undef, %0, %1, %2, @@ -1542,6 +1672,7 @@ } declare @llvm.riscv.vmerge.nxv16f32.nxv16f32( + , , , , @@ -1555,6 +1686,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv16f32.nxv16f32( + undef, %0, %1, %2, @@ -1564,6 +1696,7 @@ } declare @llvm.riscv.vmerge.nxv1f64.nxv1f64( + , , , , @@ -1577,6 +1710,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1f64.nxv1f64( + undef, %0, %1, %2, @@ -1586,6 +1720,7 @@ } declare @llvm.riscv.vmerge.nxv2f64.nxv2f64( + , , , , @@ -1599,6 +1734,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2f64.nxv2f64( + undef, %0, %1, %2, @@ -1608,6 +1744,7 @@ } declare @llvm.riscv.vmerge.nxv4f64.nxv4f64( + , , , , @@ -1621,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4f64.nxv4f64( + undef, %0, %1, %2, @@ -1630,6 +1768,7 @@ } declare @llvm.riscv.vmerge.nxv8f64.nxv8f64( + , , , , @@ -1643,6 +1782,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8f64.nxv8f64( + undef, %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsbc.nxv1i8.nxv1i8( + , , , , @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv1i8.nxv1i8( + undef, %0, %1, %2, @@ -24,6 +26,7 @@ } declare @llvm.riscv.vsbc.nxv2i8.nxv2i8( + , , , , @@ -37,6 +40,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv2i8.nxv2i8( + undef, %0, %1, %2, @@ -46,6 +50,7 @@ } declare @llvm.riscv.vsbc.nxv4i8.nxv4i8( + , , , , @@ -59,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv4i8.nxv4i8( + undef, %0, %1, %2, @@ -68,6 +74,7 @@ } declare @llvm.riscv.vsbc.nxv8i8.nxv8i8( + , , , , @@ -81,6 +88,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv8i8.nxv8i8( + undef, %0, %1, %2, @@ -90,6 +98,7 @@ } declare @llvm.riscv.vsbc.nxv16i8.nxv16i8( + , , , , @@ -103,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv16i8.nxv16i8( + undef, %0, %1, %2, @@ -112,6 +122,7 @@ } declare @llvm.riscv.vsbc.nxv32i8.nxv32i8( + , , , , @@ -125,6 +136,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv32i8.nxv32i8( + undef, %0, %1, %2, @@ -134,6 +146,7 @@ } declare @llvm.riscv.vsbc.nxv64i8.nxv64i8( + , , , , @@ -147,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv64i8.nxv64i8( + undef, %0, %1, %2, @@ -156,6 +170,7 @@ } declare @llvm.riscv.vsbc.nxv1i16.nxv1i16( + , , , , @@ -169,6 +184,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv1i16.nxv1i16( + undef, %0, %1, %2, @@ -178,6 +194,7 @@ } declare @llvm.riscv.vsbc.nxv2i16.nxv2i16( + , , , , @@ -191,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv2i16.nxv2i16( + undef, %0, %1, %2, @@ -200,6 +218,7 @@ } declare @llvm.riscv.vsbc.nxv4i16.nxv4i16( + , , , , @@ -213,6 +232,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv4i16.nxv4i16( + undef, %0, %1, %2, @@ -222,6 +242,7 @@ } declare @llvm.riscv.vsbc.nxv8i16.nxv8i16( + , , , , @@ -235,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv8i16.nxv8i16( + undef, %0, %1, %2, @@ -244,6 +266,7 @@ } declare @llvm.riscv.vsbc.nxv16i16.nxv16i16( + , , , , @@ -257,6 +280,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv16i16.nxv16i16( + undef, %0, %1, %2, @@ -266,6 +290,7 @@ } declare @llvm.riscv.vsbc.nxv32i16.nxv32i16( + , , , , @@ -279,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv32i16.nxv32i16( + undef, %0, %1, %2, @@ -288,6 +314,7 @@ } declare @llvm.riscv.vsbc.nxv1i32.nxv1i32( + , , , , @@ -301,6 +328,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv1i32.nxv1i32( + undef, %0, %1, %2, @@ -310,6 +338,7 @@ } declare @llvm.riscv.vsbc.nxv2i32.nxv2i32( + , , , , @@ -323,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv2i32.nxv2i32( + undef, %0, %1, %2, @@ -332,6 +362,7 @@ } declare @llvm.riscv.vsbc.nxv4i32.nxv4i32( + , , , , @@ -345,6 +376,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv4i32.nxv4i32( + undef, %0, %1, %2, @@ -354,6 +386,7 @@ } declare @llvm.riscv.vsbc.nxv8i32.nxv8i32( + , , , , @@ -367,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv8i32.nxv8i32( + undef, %0, %1, %2, @@ -376,6 +410,7 @@ } declare @llvm.riscv.vsbc.nxv16i32.nxv16i32( + , , , , @@ -389,6 +424,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv16i32.nxv16i32( + undef, %0, %1, %2, @@ -398,6 +434,7 @@ } declare @llvm.riscv.vsbc.nxv1i64.nxv1i64( + , , , , @@ -411,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv1i64.nxv1i64( + undef, %0, %1, %2, @@ -420,6 +458,7 @@ } declare @llvm.riscv.vsbc.nxv2i64.nxv2i64( + , , , , @@ -433,6 +472,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv2i64.nxv2i64( + undef, %0, %1, %2, @@ -442,6 +482,7 @@ } declare @llvm.riscv.vsbc.nxv4i64.nxv4i64( + , , , , @@ -455,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv4i64.nxv4i64( + undef, %0, %1, %2, @@ -464,6 +506,7 @@ } declare @llvm.riscv.vsbc.nxv8i64.nxv8i64( + , , , , @@ -477,6 +520,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv8i64.nxv8i64( + undef, %0, %1, %2, @@ -486,6 +530,7 @@ } declare @llvm.riscv.vsbc.nxv1i8.i8( + , , i8, , @@ -499,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv1i8.i8( + undef, %0, i8 %1, %2, @@ -508,6 +554,7 @@ } declare @llvm.riscv.vsbc.nxv2i8.i8( + , , i8, , @@ -521,6 +568,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv2i8.i8( + undef, %0, i8 %1, %2, @@ -530,6 +578,7 @@ } declare @llvm.riscv.vsbc.nxv4i8.i8( + , , i8, , @@ -543,6 +592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv4i8.i8( + undef, %0, i8 %1, %2, @@ -552,6 +602,7 @@ } declare @llvm.riscv.vsbc.nxv8i8.i8( + , , i8, , @@ -565,6 +616,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv8i8.i8( + undef, %0, i8 %1, %2, @@ -574,6 +626,7 @@ } declare @llvm.riscv.vsbc.nxv16i8.i8( + , , i8, , @@ -587,6 +640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv16i8.i8( + undef, %0, i8 %1, %2, @@ -596,6 +650,7 @@ } declare @llvm.riscv.vsbc.nxv32i8.i8( + , , i8, , @@ -609,6 +664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv32i8.i8( + undef, %0, i8 %1, %2, @@ -618,6 +674,7 @@ } declare @llvm.riscv.vsbc.nxv64i8.i8( + , , i8, , @@ -631,6 +688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv64i8.i8( + undef, %0, i8 %1, %2, @@ -640,6 +698,7 @@ } declare @llvm.riscv.vsbc.nxv1i16.i16( + , , i16, , @@ -653,6 +712,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv1i16.i16( + undef, %0, i16 %1, %2, @@ -662,6 +722,7 @@ } declare @llvm.riscv.vsbc.nxv2i16.i16( + , , i16, , @@ -675,6 +736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv2i16.i16( + undef, %0, i16 %1, %2, @@ -684,6 +746,7 @@ } declare @llvm.riscv.vsbc.nxv4i16.i16( + , , i16, , @@ -697,6 +760,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv4i16.i16( + undef, %0, i16 %1, %2, @@ -706,6 +770,7 @@ } declare @llvm.riscv.vsbc.nxv8i16.i16( + , , i16, , @@ -719,6 +784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv8i16.i16( + undef, %0, i16 %1, %2, @@ -728,6 +794,7 @@ } declare @llvm.riscv.vsbc.nxv16i16.i16( + , , i16, , @@ -741,6 +808,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv16i16.i16( + undef, %0, i16 %1, %2, @@ -750,6 +818,7 @@ } declare @llvm.riscv.vsbc.nxv32i16.i16( + , , i16, , @@ -763,6 +832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv32i16.i16( + undef, %0, i16 %1, %2, @@ -772,6 +842,7 @@ } declare @llvm.riscv.vsbc.nxv1i32.i32( + , , i32, , @@ -785,6 +856,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv1i32.i32( + undef, %0, i32 %1, %2, @@ -794,6 +866,7 @@ } declare @llvm.riscv.vsbc.nxv2i32.i32( + , , i32, , @@ -807,6 +880,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv2i32.i32( + undef, %0, i32 %1, %2, @@ -816,6 +890,7 @@ } declare @llvm.riscv.vsbc.nxv4i32.i32( + , , i32, , @@ -829,6 +904,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv4i32.i32( + undef, %0, i32 %1, %2, @@ -838,6 +914,7 @@ } declare @llvm.riscv.vsbc.nxv8i32.i32( + , , i32, , @@ -851,6 +928,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv8i32.i32( + undef, %0, i32 %1, %2, @@ -860,6 +938,7 @@ } declare @llvm.riscv.vsbc.nxv16i32.i32( + , , i32, , @@ -873,6 +952,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv16i32.i32( + undef, %0, i32 %1, %2, @@ -882,6 +962,7 @@ } declare @llvm.riscv.vsbc.nxv1i64.i64( + , , i64, , @@ -901,6 +982,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv1i64.i64( + undef, %0, i64 %1, %2, @@ -910,6 +992,7 @@ } declare @llvm.riscv.vsbc.nxv2i64.i64( + , , i64, , @@ -929,6 +1012,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv2i64.i64( + undef, %0, i64 %1, %2, @@ -938,6 +1022,7 @@ } declare @llvm.riscv.vsbc.nxv4i64.i64( + , , i64, , @@ -957,6 +1042,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv4i64.i64( + undef, %0, i64 %1, %2, @@ -966,6 +1052,7 @@ } declare @llvm.riscv.vsbc.nxv8i64.i64( + , , i64, , @@ -985,6 +1072,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv8i64.i64( + undef, %0, i64 %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsbc.nxv1i8.nxv1i8( + , , , , @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv1i8.nxv1i8( + undef, %0, %1, %2, @@ -24,6 +26,7 @@ } declare @llvm.riscv.vsbc.nxv2i8.nxv2i8( + , , , , @@ -37,6 +40,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv2i8.nxv2i8( + undef, %0, %1, %2, @@ -46,6 +50,7 @@ } declare @llvm.riscv.vsbc.nxv4i8.nxv4i8( + , , , , @@ -59,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv4i8.nxv4i8( + undef, %0, %1, %2, @@ -68,6 +74,7 @@ } declare @llvm.riscv.vsbc.nxv8i8.nxv8i8( + , , , , @@ -81,6 +88,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv8i8.nxv8i8( + undef, %0, %1, %2, @@ -90,6 +98,7 @@ } declare @llvm.riscv.vsbc.nxv16i8.nxv16i8( + , , , , @@ -103,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv16i8.nxv16i8( + undef, %0, %1, %2, @@ -112,6 +122,7 @@ } declare @llvm.riscv.vsbc.nxv32i8.nxv32i8( + , , , , @@ -125,6 +136,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv32i8.nxv32i8( + undef, %0, %1, %2, @@ -134,6 +146,7 @@ } declare @llvm.riscv.vsbc.nxv64i8.nxv64i8( + , , , , @@ -147,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv64i8.nxv64i8( + undef, %0, %1, %2, @@ -156,6 +170,7 @@ } declare @llvm.riscv.vsbc.nxv1i16.nxv1i16( + , , , , @@ -169,6 +184,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv1i16.nxv1i16( + undef, %0, %1, %2, @@ -178,6 +194,7 @@ } declare @llvm.riscv.vsbc.nxv2i16.nxv2i16( + , , , , @@ -191,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv2i16.nxv2i16( + undef, %0, %1, %2, @@ -200,6 +218,7 @@ } declare @llvm.riscv.vsbc.nxv4i16.nxv4i16( + , , , , @@ -213,6 +232,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv4i16.nxv4i16( + undef, %0, %1, %2, @@ -222,6 +242,7 @@ } declare @llvm.riscv.vsbc.nxv8i16.nxv8i16( + , , , , @@ -235,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv8i16.nxv8i16( + undef, %0, %1, %2, @@ -244,6 +266,7 @@ } declare @llvm.riscv.vsbc.nxv16i16.nxv16i16( + , , , , @@ -257,6 +280,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv16i16.nxv16i16( + undef, %0, %1, %2, @@ -266,6 +290,7 @@ } declare @llvm.riscv.vsbc.nxv32i16.nxv32i16( + , , , , @@ -279,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv32i16.nxv32i16( + undef, %0, %1, %2, @@ -288,6 +314,7 @@ } declare @llvm.riscv.vsbc.nxv1i32.nxv1i32( + , , , , @@ -301,6 +328,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv1i32.nxv1i32( + undef, %0, %1, %2, @@ -310,6 +338,7 @@ } declare @llvm.riscv.vsbc.nxv2i32.nxv2i32( + , , , , @@ -323,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv2i32.nxv2i32( + undef, %0, %1, %2, @@ -332,6 +362,7 @@ } declare @llvm.riscv.vsbc.nxv4i32.nxv4i32( + , , , , @@ -345,6 +376,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv4i32.nxv4i32( + undef, %0, %1, %2, @@ -354,6 +386,7 @@ } declare @llvm.riscv.vsbc.nxv8i32.nxv8i32( + , , , , @@ -367,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv8i32.nxv8i32( + undef, %0, %1, %2, @@ -376,6 +410,7 @@ } declare @llvm.riscv.vsbc.nxv16i32.nxv16i32( + , , , , @@ -389,6 +424,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv16i32.nxv16i32( + undef, %0, %1, %2, @@ -398,6 +434,7 @@ } declare @llvm.riscv.vsbc.nxv1i64.nxv1i64( + , , , , @@ -411,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv1i64.nxv1i64( + undef, %0, %1, %2, @@ -420,6 +458,7 @@ } declare @llvm.riscv.vsbc.nxv2i64.nxv2i64( + , , , , @@ -433,6 +472,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv2i64.nxv2i64( + undef, %0, %1, %2, @@ -442,6 +482,7 @@ } declare @llvm.riscv.vsbc.nxv4i64.nxv4i64( + , , , , @@ -455,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv4i64.nxv4i64( + undef, %0, %1, %2, @@ -464,6 +506,7 @@ } declare @llvm.riscv.vsbc.nxv8i64.nxv8i64( + , , , , @@ -477,6 +520,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv8i64.nxv8i64( + undef, %0, %1, %2, @@ -486,6 +530,7 @@ } declare @llvm.riscv.vsbc.nxv1i8.i8( + , , i8, , @@ -499,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv1i8.i8( + undef, %0, i8 %1, %2, @@ -508,6 +554,7 @@ } declare @llvm.riscv.vsbc.nxv2i8.i8( + , , i8, , @@ -521,6 +568,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv2i8.i8( + undef, %0, i8 %1, %2, @@ -530,6 +578,7 @@ } declare @llvm.riscv.vsbc.nxv4i8.i8( + , , i8, , @@ -543,6 +592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv4i8.i8( + undef, %0, i8 %1, %2, @@ -552,6 +602,7 @@ } declare @llvm.riscv.vsbc.nxv8i8.i8( + , , i8, , @@ -565,6 +616,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv8i8.i8( + undef, %0, i8 %1, %2, @@ -574,6 +626,7 @@ } declare @llvm.riscv.vsbc.nxv16i8.i8( + , , i8, , @@ -587,6 +640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv16i8.i8( + undef, %0, i8 %1, %2, @@ -596,6 +650,7 @@ } declare @llvm.riscv.vsbc.nxv32i8.i8( + , , i8, , @@ -609,6 +664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv32i8.i8( + undef, %0, i8 %1, %2, @@ -618,6 +674,7 @@ } declare @llvm.riscv.vsbc.nxv64i8.i8( + , , i8, , @@ -631,6 +688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv64i8.i8( + undef, %0, i8 %1, %2, @@ -640,6 +698,7 @@ } declare @llvm.riscv.vsbc.nxv1i16.i16( + , , i16, , @@ -653,6 +712,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv1i16.i16( + undef, %0, i16 %1, %2, @@ -662,6 +722,7 @@ } declare @llvm.riscv.vsbc.nxv2i16.i16( + , , i16, , @@ -675,6 +736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv2i16.i16( + undef, %0, i16 %1, %2, @@ -684,6 +746,7 @@ } declare @llvm.riscv.vsbc.nxv4i16.i16( + , , i16, , @@ -697,6 +760,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv4i16.i16( + undef, %0, i16 %1, %2, @@ -706,6 +770,7 @@ } declare @llvm.riscv.vsbc.nxv8i16.i16( + , , i16, , @@ -719,6 +784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv8i16.i16( + undef, %0, i16 %1, %2, @@ -728,6 +794,7 @@ } declare @llvm.riscv.vsbc.nxv16i16.i16( + , , i16, , @@ -741,6 +808,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv16i16.i16( + undef, %0, i16 %1, %2, @@ -750,6 +818,7 @@ } declare @llvm.riscv.vsbc.nxv32i16.i16( + , , i16, , @@ -763,6 +832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv32i16.i16( + undef, %0, i16 %1, %2, @@ -772,6 +842,7 @@ } declare @llvm.riscv.vsbc.nxv1i32.i32( + , , i32, , @@ -785,6 +856,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv1i32.i32( + undef, %0, i32 %1, %2, @@ -794,6 +866,7 @@ } declare @llvm.riscv.vsbc.nxv2i32.i32( + , , i32, , @@ -807,6 +880,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv2i32.i32( + undef, %0, i32 %1, %2, @@ -816,6 +890,7 @@ } declare @llvm.riscv.vsbc.nxv4i32.i32( + , , i32, , @@ -829,6 +904,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv4i32.i32( + undef, %0, i32 %1, %2, @@ -838,6 +914,7 @@ } declare @llvm.riscv.vsbc.nxv8i32.i32( + , , i32, , @@ -851,6 +928,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv8i32.i32( + undef, %0, i32 %1, %2, @@ -860,6 +938,7 @@ } declare @llvm.riscv.vsbc.nxv16i32.i32( + , , i32, , @@ -873,6 +952,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv16i32.i32( + undef, %0, i32 %1, %2, @@ -882,6 +962,7 @@ } declare @llvm.riscv.vsbc.nxv1i64.i64( + , , i64, , @@ -895,6 +976,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv1i64.i64( + undef, %0, i64 %1, %2, @@ -904,6 +986,7 @@ } declare @llvm.riscv.vsbc.nxv2i64.i64( + , , i64, , @@ -917,6 +1000,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv2i64.i64( + undef, %0, i64 %1, %2, @@ -926,6 +1010,7 @@ } declare @llvm.riscv.vsbc.nxv4i64.i64( + , , i64, , @@ -939,6 +1024,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv4i64.i64( + undef, %0, i64 %1, %2, @@ -948,6 +1034,7 @@ } declare @llvm.riscv.vsbc.nxv8i64.i64( + , , i64, , @@ -961,6 +1048,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsbc.nxv8i64.i64( + undef, %0, i64 %1, %2,