diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -1616,6 +1616,7 @@ [["w", "wv"]]>; // 12.3. Vector Integer Extension +let HasNoMaskPassThru = true in { let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { def vsext_vf2 : RVVIntExt<"vsext", "w", "wv", "csi">; def vzext_vf2 : RVVIntExt<"vzext", "Uw", "UwUv", "csi">; @@ -1628,6 +1629,7 @@ def vsext_vf8 : RVVIntExt<"vsext", "o", "ov", "c">; def vzext_vf8 : RVVIntExt<"vzext", "Uo", "UoUv", "c">; } +} // 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions let HasMask = false, HasPolicy = false in { @@ -1833,6 +1835,7 @@ defm vfwnmsac : RVVFloatingWidenTerBuiltinSet; // 14.8. Vector Floating-Point Square-Root Instruction +let HasNoMaskPassThru = true in { def vfsqrt : RVVFloatingUnaryVVBuiltin; // 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction @@ -1842,7 +1845,6 @@ def vfrec7 : RVVFloatingUnaryVVBuiltin; // 14.11. Vector Floating-Point MIN/MAX Instructions -let HasNoMaskPassThru = true in { defm vfmin : RVVFloatingBinBuiltinSet; defm vfmax : RVVFloatingBinBuiltinSet; @@ -1865,7 +1867,7 @@ } // 14.14. Vector Floating-Point Classify Instruction -let Name = "vfclass_v", HasPolicy = false in +let Name = "vfclass_v", HasNoMaskPassThru = true in def vfclass : RVVOp0Builtin<"Uv", "Uvv", "xfd">; // 14.15. Vector Floating-Point Merge Instructio @@ -1887,6 +1889,7 @@ [["f", "v", "ve"]]>; // 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions +let HasNoMaskPassThru = true in { def vfcvt_xu_f_v : RVVConvToUnsignedBuiltin<"vfcvt_xu">; def vfcvt_x_f_v : RVVConvToSignedBuiltin<"vfcvt_x">; def vfcvt_rtz_xu_f_v : RVVConvToUnsignedBuiltin<"vfcvt_rtz_xu">; @@ -1916,6 +1919,7 @@ def vfncvt_f_f_w : RVVConvBuiltin<"v", "vw", "xf", "vfncvt_f">; def vfncvt_rod_f_f_w : RVVConvBuiltin<"v", "vw", "xf", "vfncvt_rod_f">; } +} // 15. Vector Reduction Operations // 15.1. Vector Single-Width Integer Reduction Instructions @@ -1981,7 +1985,7 @@ // 16.6. vmsof.m set-only-first mask bit def vmsof : RVVMaskUnaryBuiltin; -let HasNoMaskedOverloaded = false in { +let HasNoMaskPassThru = true, HasNoMaskedOverloaded = false in { // 16.8. Vector Iota Instruction defm viota : RVVOutBuiltinSet<"viota", "csil", [["m", "Uv", "Uvm"]]>; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) { @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) { @@ -25,7 +25,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) { @@ -34,7 +34,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) { @@ -43,7 +43,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv16f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv16f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) { @@ -52,7 +52,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) { @@ -61,7 +61,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) { @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) { @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -118,7 +118,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -128,7 +128,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -138,7 +138,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -148,7 +148,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -158,7 +158,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) { @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) { @@ -25,7 +25,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) { @@ -34,7 +34,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) { @@ -43,7 +43,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) { @@ -52,7 +52,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) { @@ -61,7 +61,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) { @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) { @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) { @@ -97,7 +97,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) { @@ -106,7 +106,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) { @@ -115,7 +115,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) { @@ -124,7 +124,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) { @@ -133,7 +133,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) { @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) { @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) { @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) { @@ -169,7 +169,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) { @@ -178,7 +178,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) { @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) { @@ -196,7 +196,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) { @@ -205,7 +205,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) { @@ -214,7 +214,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) { @@ -223,7 +223,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) { @@ -232,7 +232,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) { @@ -241,7 +241,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) { @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) { @@ -259,7 +259,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) { @@ -268,7 +268,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) { @@ -277,7 +277,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) { @@ -286,7 +286,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) { @@ -295,7 +295,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) { @@ -304,7 +304,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) { @@ -313,7 +313,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) { @@ -322,7 +322,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) { @@ -331,7 +331,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) { @@ -340,7 +340,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) { @@ -349,7 +349,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) { @@ -358,7 +358,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) { @@ -367,7 +367,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) { @@ -376,7 +376,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) { @@ -385,7 +385,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) { @@ -394,7 +394,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) { @@ -403,7 +403,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) { @@ -412,7 +412,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) { @@ -421,7 +421,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) { @@ -430,7 +430,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) { @@ -439,7 +439,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) { @@ -448,7 +448,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) { @@ -457,7 +457,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) { @@ -466,7 +466,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) { @@ -475,7 +475,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) { @@ -484,7 +484,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) { @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) { @@ -25,7 +25,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) { @@ -34,7 +34,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) { @@ -43,7 +43,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) { @@ -52,7 +52,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) { @@ -61,7 +61,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) { @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) { @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) { @@ -97,7 +97,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) { @@ -106,7 +106,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) { @@ -115,7 +115,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) { @@ -124,7 +124,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) { @@ -133,7 +133,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) { @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) { @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) { @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) { @@ -169,7 +169,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) { @@ -178,7 +178,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) { @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { @@ -196,7 +196,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { @@ -205,7 +205,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { @@ -214,7 +214,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { @@ -223,7 +223,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) { @@ -232,7 +232,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) { @@ -241,7 +241,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) { @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) { @@ -259,7 +259,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) { @@ -268,7 +268,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) { @@ -277,7 +277,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) { @@ -286,7 +286,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) { @@ -295,7 +295,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) { @@ -304,7 +304,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) { @@ -313,7 +313,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) { @@ -322,7 +322,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) { @@ -331,7 +331,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) { @@ -340,7 +340,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) { @@ -349,7 +349,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) { @@ -358,7 +358,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) { @@ -367,7 +367,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) { @@ -376,7 +376,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) { @@ -385,7 +385,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) { @@ -394,7 +394,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) { @@ -403,7 +403,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) { @@ -412,7 +412,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) { @@ -421,7 +421,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) { @@ -430,7 +430,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) { @@ -439,7 +439,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) { @@ -448,7 +448,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) { @@ -457,7 +457,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { @@ -466,7 +466,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) { @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) { @@ -25,7 +25,7 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) { @@ -34,7 +34,7 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) { @@ -43,7 +43,7 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) { @@ -52,7 +52,7 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) { @@ -61,7 +61,7 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) { @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) { @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) { @@ -25,7 +25,7 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) { @@ -34,7 +34,7 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) { @@ -43,7 +43,7 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) { @@ -52,7 +52,7 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) { @@ -61,7 +61,7 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) { @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) { @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) { @@ -25,7 +25,7 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) { @@ -34,7 +34,7 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) { @@ -43,7 +43,7 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv16f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv16f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) { @@ -52,7 +52,7 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) { @@ -61,7 +61,7 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) { @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) { @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) { @@ -25,7 +25,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) { @@ -34,7 +34,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) { @@ -43,7 +43,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) { @@ -52,7 +52,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) { @@ -61,7 +61,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) { @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) { @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) { @@ -97,7 +97,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) { @@ -106,7 +106,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) { @@ -115,7 +115,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) { @@ -124,7 +124,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) { @@ -133,7 +133,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) { @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) { @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) { @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) { @@ -169,7 +169,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) { @@ -178,7 +178,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) { @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) { @@ -196,7 +196,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) { @@ -205,7 +205,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) { @@ -214,7 +214,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) { @@ -223,7 +223,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) { @@ -232,7 +232,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) { @@ -241,7 +241,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) { @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) { @@ -259,7 +259,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) { @@ -268,7 +268,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) { @@ -277,7 +277,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) { @@ -286,7 +286,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) { @@ -295,7 +295,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) { @@ -304,7 +304,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) { @@ -313,7 +313,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) { @@ -322,7 +322,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) { @@ -331,7 +331,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) { @@ -340,7 +340,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i16.nxv1i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i16.nxv2i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i16.nxv4i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i16.nxv8i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i16.nxv16i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv32i16.nxv32i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i32.nxv2i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i32.nxv2i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf4_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i32.nxv4i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i32.nxv4i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf4_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i32.nxv8i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i32.nxv8i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf4_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i32.nxv16i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i32.nxv16i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i32.nxv2i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i32.nxv4i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i32.nxv8i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i32.nxv16i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf4_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf4_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf4_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf2_i64m8(vint32m4_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i16.nxv1i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t op1, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i16.nxv2i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t op1, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i16.nxv4i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t op1, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i16.nxv8i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t op1, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i16.nxv16i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t op1, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv32i16.nxv32i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t op1, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i32.nxv2i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i32.nxv2i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf4_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i32.nxv4i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i32.nxv4i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf4_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i32.nxv8i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i32.nxv8i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf4_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i32.nxv16i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i32.nxv16i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t op1, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i32.nxv2i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t op1, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i32.nxv4i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t op1, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i32.nxv8i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t op1, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i32.nxv16i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t op1, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf4_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf4_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf4_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t op1, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t op1, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf2_u64m8(vuint32m4_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) { @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) { @@ -35,7 +35,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) { @@ -44,7 +44,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv16f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv16f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) { @@ -53,7 +53,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) { @@ -62,7 +62,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) { @@ -71,7 +71,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) { @@ -80,7 +80,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) { @@ -89,7 +89,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -109,7 +109,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -119,7 +119,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -129,7 +129,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -139,7 +139,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -149,7 +149,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -169,7 +169,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfclass_v_u16mf4 (vfloat16mf4_t op1, size_t vl) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfclass_v_u16mf2 (vfloat16mf2_t op1, size_t vl) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfclass_v_u16m1 (vfloat16m1_t op1, size_t vl) { @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfclass_v_u16m2 (vfloat16m2_t op1, size_t vl) { @@ -215,7 +215,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv16f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv16f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfclass_v_u16m4 (vfloat16m4_t op1, size_t vl) { @@ -224,7 +224,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv32f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv32f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vfclass_v_u16m8 (vfloat16m8_t op1, size_t vl) { @@ -233,7 +233,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfclass_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { @@ -242,7 +242,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfclass_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { @@ -251,7 +251,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfclass_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { @@ -260,7 +260,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfclass_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { @@ -269,7 +269,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfclass_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { @@ -278,7 +278,7 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vfclass_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) { @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) { @@ -35,7 +35,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) { @@ -44,7 +44,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) { @@ -53,7 +53,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) { @@ -62,7 +62,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) { @@ -71,7 +71,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) { @@ -80,7 +80,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) { @@ -89,7 +89,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) { @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) { @@ -107,7 +107,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) { @@ -116,7 +116,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) { @@ -125,7 +125,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) { @@ -134,7 +134,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) { @@ -143,7 +143,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) { @@ -152,7 +152,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) { @@ -161,7 +161,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) { @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) { @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) { @@ -215,7 +215,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) { @@ -224,7 +224,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) { @@ -233,7 +233,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) { @@ -242,7 +242,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) { @@ -251,7 +251,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) { @@ -260,7 +260,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) { @@ -269,7 +269,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) { @@ -278,7 +278,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) { @@ -287,7 +287,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) { @@ -296,7 +296,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) { @@ -305,7 +305,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) { @@ -314,7 +314,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) { @@ -323,7 +323,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) { @@ -332,7 +332,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) { @@ -341,7 +341,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) { @@ -350,7 +350,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) { @@ -359,7 +359,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) { @@ -368,7 +368,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) { @@ -377,7 +377,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) { @@ -386,7 +386,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) { @@ -395,7 +395,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) { @@ -404,7 +404,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) { @@ -413,7 +413,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) { @@ -422,7 +422,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) { @@ -431,7 +431,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) { @@ -440,7 +440,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) { @@ -449,7 +449,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) { @@ -458,7 +458,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) { @@ -467,7 +467,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) { @@ -476,7 +476,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) { @@ -485,7 +485,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t src, size_t vl) { @@ -1036,7 +1036,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfcvt_x_f_v_i16mf4 (vfloat16mf4_t src, size_t vl) { @@ -1045,7 +1045,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4 (vfloat16mf4_t src, size_t vl) { @@ -1054,7 +1054,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfcvt_x_f_v_i16mf2 (vfloat16mf2_t src, size_t vl) { @@ -1063,7 +1063,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2 (vfloat16mf2_t src, size_t vl) { @@ -1072,7 +1072,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfcvt_x_f_v_i16m1 (vfloat16m1_t src, size_t vl) { @@ -1081,7 +1081,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfcvt_rtz_x_f_v_i16m1 (vfloat16m1_t src, size_t vl) { @@ -1090,7 +1090,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfcvt_x_f_v_i16m2 (vfloat16m2_t src, size_t vl) { @@ -1099,7 +1099,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfcvt_rtz_x_f_v_i16m2 (vfloat16m2_t src, size_t vl) { @@ -1108,7 +1108,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfcvt_x_f_v_i16m4 (vfloat16m4_t src, size_t vl) { @@ -1117,7 +1117,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfcvt_rtz_x_f_v_i16m4 (vfloat16m4_t src, size_t vl) { @@ -1126,7 +1126,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vfcvt_x_f_v_i16m8 (vfloat16m8_t src, size_t vl) { @@ -1135,7 +1135,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vfcvt_rtz_x_f_v_i16m8 (vfloat16m8_t src, size_t vl) { @@ -1144,7 +1144,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfcvt_xu_f_v_u16mf4 (vfloat16mf4_t src, size_t vl) { @@ -1153,7 +1153,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4 (vfloat16mf4_t src, size_t vl) { @@ -1162,7 +1162,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfcvt_xu_f_v_u16mf2 (vfloat16mf2_t src, size_t vl) { @@ -1171,7 +1171,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2 (vfloat16mf2_t src, size_t vl) { @@ -1180,7 +1180,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfcvt_xu_f_v_u16m1 (vfloat16m1_t src, size_t vl) { @@ -1189,7 +1189,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1 (vfloat16m1_t src, size_t vl) { @@ -1198,7 +1198,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfcvt_xu_f_v_u16m2 (vfloat16m2_t src, size_t vl) { @@ -1207,7 +1207,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2 (vfloat16m2_t src, size_t vl) { @@ -1216,7 +1216,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfcvt_xu_f_v_u16m4 (vfloat16m4_t src, size_t vl) { @@ -1225,7 +1225,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4 (vfloat16m4_t src, size_t vl) { @@ -1234,7 +1234,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vfcvt_xu_f_v_u16m8 (vfloat16m8_t src, size_t vl) { @@ -1243,7 +1243,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8 (vfloat16m8_t src, size_t vl) { @@ -1252,7 +1252,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfcvt_f_x_v_f16mf4 (vint16mf4_t src, size_t vl) { @@ -1261,7 +1261,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfcvt_f_x_v_f16mf2 (vint16mf2_t src, size_t vl) { @@ -1270,7 +1270,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfcvt_f_x_v_f16m1 (vint16m1_t src, size_t vl) { @@ -1279,7 +1279,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfcvt_f_x_v_f16m2 (vint16m2_t src, size_t vl) { @@ -1288,7 +1288,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfcvt_f_x_v_f16m4 (vint16m4_t src, size_t vl) { @@ -1297,7 +1297,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfcvt_f_x_v_f16m8 (vint16m8_t src, size_t vl) { @@ -1306,7 +1306,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4 (vuint16mf4_t src, size_t vl) { @@ -1315,7 +1315,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2 (vuint16mf2_t src, size_t vl) { @@ -1324,7 +1324,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfcvt_f_xu_v_f16m1 (vuint16m1_t src, size_t vl) { @@ -1333,7 +1333,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfcvt_f_xu_v_f16m2 (vuint16m2_t src, size_t vl) { @@ -1342,7 +1342,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfcvt_f_xu_v_f16m4 (vuint16m4_t src, size_t vl) { @@ -1351,7 +1351,7 @@ // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfcvt_f_xu_v_f16m8 (vuint16m8_t src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) { @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) { @@ -35,7 +35,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) { @@ -44,7 +44,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) { @@ -53,7 +53,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) { @@ -62,7 +62,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) { @@ -71,7 +71,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) { @@ -80,7 +80,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) { @@ -89,7 +89,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) { @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) { @@ -107,7 +107,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) { @@ -116,7 +116,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) { @@ -125,7 +125,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) { @@ -134,7 +134,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) { @@ -143,7 +143,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) { @@ -152,7 +152,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) { @@ -161,7 +161,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) { @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { @@ -215,7 +215,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { @@ -224,7 +224,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) { @@ -233,7 +233,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) { @@ -242,7 +242,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) { @@ -251,7 +251,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) { @@ -260,7 +260,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) { @@ -269,7 +269,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) { @@ -278,7 +278,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) { @@ -287,7 +287,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) { @@ -296,7 +296,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) { @@ -305,7 +305,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) { @@ -314,7 +314,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) { @@ -323,7 +323,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) { @@ -332,7 +332,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) { @@ -341,7 +341,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) { @@ -350,7 +350,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) { @@ -359,7 +359,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) { @@ -368,7 +368,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) { @@ -377,7 +377,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) { @@ -386,7 +386,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) { @@ -395,7 +395,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) { @@ -404,7 +404,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) { @@ -413,7 +413,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) { @@ -422,7 +422,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) { @@ -431,7 +431,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) { @@ -440,7 +440,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) { @@ -449,7 +449,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) { @@ -458,7 +458,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { @@ -467,7 +467,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { @@ -1012,7 +1012,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vfncvt_x_f_w_i8mf8 (vfloat16mf4_t src, size_t vl) { @@ -1021,7 +1021,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8 (vfloat16mf4_t src, size_t vl) { @@ -1030,7 +1030,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vfncvt_x_f_w_i8mf4 (vfloat16mf2_t src, size_t vl) { @@ -1039,7 +1039,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4 (vfloat16mf2_t src, size_t vl) { @@ -1048,7 +1048,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vfncvt_x_f_w_i8mf2 (vfloat16m1_t src, size_t vl) { @@ -1057,7 +1057,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2 (vfloat16m1_t src, size_t vl) { @@ -1066,7 +1066,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vfncvt_x_f_w_i8m1 (vfloat16m2_t src, size_t vl) { @@ -1075,7 +1075,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vfncvt_rtz_x_f_w_i8m1 (vfloat16m2_t src, size_t vl) { @@ -1084,7 +1084,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vfncvt_x_f_w_i8m2 (vfloat16m4_t src, size_t vl) { @@ -1093,7 +1093,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vfncvt_rtz_x_f_w_i8m2 (vfloat16m4_t src, size_t vl) { @@ -1102,7 +1102,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vfncvt_x_f_w_i8m4 (vfloat16m8_t src, size_t vl) { @@ -1111,7 +1111,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vfncvt_rtz_x_f_w_i8m4 (vfloat16m8_t src, size_t vl) { @@ -1120,7 +1120,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vfncvt_xu_f_w_u8mf8 (vfloat16mf4_t src, size_t vl) { @@ -1129,7 +1129,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8 (vfloat16mf4_t src, size_t vl) { @@ -1138,7 +1138,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vfncvt_xu_f_w_u8mf4 (vfloat16mf2_t src, size_t vl) { @@ -1147,7 +1147,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4 (vfloat16mf2_t src, size_t vl) { @@ -1156,7 +1156,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vfncvt_xu_f_w_u8mf2 (vfloat16m1_t src, size_t vl) { @@ -1165,7 +1165,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2 (vfloat16m1_t src, size_t vl) { @@ -1174,7 +1174,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vfncvt_xu_f_w_u8m1 (vfloat16m2_t src, size_t vl) { @@ -1183,7 +1183,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1 (vfloat16m2_t src, size_t vl) { @@ -1192,7 +1192,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vfncvt_xu_f_w_u8m2 (vfloat16m4_t src, size_t vl) { @@ -1201,7 +1201,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2 (vfloat16m4_t src, size_t vl) { @@ -1210,7 +1210,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vfncvt_xu_f_w_u8m4 (vfloat16m8_t src, size_t vl) { @@ -1219,7 +1219,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4 (vfloat16m8_t src, size_t vl) { @@ -1228,7 +1228,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_f_x_w_f16mf4 (vint32mf2_t src, size_t vl) { @@ -1237,7 +1237,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_f_x_w_f16mf2 (vint32m1_t src, size_t vl) { @@ -1246,7 +1246,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_f_x_w_f16m1 (vint32m2_t src, size_t vl) { @@ -1255,7 +1255,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_f_x_w_f16m2 (vint32m4_t src, size_t vl) { @@ -1264,7 +1264,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_f_x_w_f16m4 (vint32m8_t src, size_t vl) { @@ -1273,7 +1273,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4 (vuint32mf2_t src, size_t vl) { @@ -1282,7 +1282,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2 (vuint32m1_t src, size_t vl) { @@ -1291,7 +1291,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_f_xu_w_f16m1 (vuint32m2_t src, size_t vl) { @@ -1300,7 +1300,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_f_xu_w_f16m2 (vuint32m4_t src, size_t vl) { @@ -1309,7 +1309,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_f_xu_w_f16m4 (vuint32m8_t src, size_t vl) { @@ -1318,7 +1318,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_f_f_w_f16mf4 (vfloat32mf2_t src, size_t vl) { @@ -1327,7 +1327,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4 (vfloat32mf2_t src, size_t vl) { @@ -1336,7 +1336,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_f_f_w_f16mf2 (vfloat32m1_t src, size_t vl) { @@ -1345,7 +1345,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2 (vfloat32m1_t src, size_t vl) { @@ -1354,7 +1354,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_f_f_w_f16m1 (vfloat32m2_t src, size_t vl) { @@ -1363,7 +1363,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1 (vfloat32m2_t src, size_t vl) { @@ -1372,7 +1372,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_f_f_w_f16m2 (vfloat32m4_t src, size_t vl) { @@ -1381,7 +1381,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2 (vfloat32m4_t src, size_t vl) { @@ -1390,7 +1390,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_f_f_w_f16m4 (vfloat32m8_t src, size_t vl) { @@ -1399,7 +1399,7 @@ // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4 (vfloat32m8_t src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) { @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) { @@ -35,7 +35,7 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) { @@ -44,7 +44,7 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) { @@ -53,7 +53,7 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) { @@ -62,7 +62,7 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) { @@ -71,7 +71,7 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) { @@ -80,7 +80,7 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrec7_v_f16mf4 (vfloat16mf4_t op1, size_t vl) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrec7_v_f16mf2 (vfloat16mf2_t op1, size_t vl) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrec7_v_f16m1 (vfloat16m1_t op1, size_t vl) { @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrec7_v_f16m2 (vfloat16m2_t op1, size_t vl) { @@ -215,7 +215,7 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrec7_v_f16m4 (vfloat16m4_t op1, size_t vl) { @@ -224,7 +224,7 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv32f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv32f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrec7_v_f16m8 (vfloat16m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) { @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) { @@ -35,7 +35,7 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) { @@ -44,7 +44,7 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) { @@ -53,7 +53,7 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) { @@ -62,7 +62,7 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) { @@ -71,7 +71,7 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) { @@ -80,7 +80,7 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t op1, size_t vl) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrsqrt7_v_f16mf4 (vfloat16mf4_t op1, size_t vl) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrsqrt7_v_f16mf2 (vfloat16mf2_t op1, size_t vl) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrsqrt7_v_f16m1 (vfloat16m1_t op1, size_t vl) { @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrsqrt7_v_f16m2 (vfloat16m2_t op1, size_t vl) { @@ -215,7 +215,7 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrsqrt7_v_f16m4 (vfloat16m4_t op1, size_t vl) { @@ -224,7 +224,7 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv32f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv32f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrsqrt7_v_f16m8 (vfloat16m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) { @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) { @@ -35,7 +35,7 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) { @@ -44,7 +44,7 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv16f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv16f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) { @@ -53,7 +53,7 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) { @@ -62,7 +62,7 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) { @@ -71,7 +71,7 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) { @@ -80,7 +80,7 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsqrt_v_f16mf4 (vfloat16mf4_t op1, size_t vl) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsqrt_v_f16mf2 (vfloat16mf2_t op1, size_t vl) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsqrt_v_f16m1 (vfloat16m1_t op1, size_t vl) { @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsqrt_v_f16m2 (vfloat16m2_t op1, size_t vl) { @@ -215,7 +215,7 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv16f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv16f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsqrt_v_f16m4 (vfloat16m4_t op1, size_t vl) { @@ -224,7 +224,7 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv32f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv32f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsqrt_v_f16m8 (vfloat16m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) { @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) { @@ -35,7 +35,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) { @@ -44,7 +44,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) { @@ -53,7 +53,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) { @@ -62,7 +62,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) { @@ -71,7 +71,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) { @@ -80,7 +80,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) { @@ -89,7 +89,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) { @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) { @@ -107,7 +107,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) { @@ -116,7 +116,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) { @@ -125,7 +125,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) { @@ -134,7 +134,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) { @@ -143,7 +143,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) { @@ -152,7 +152,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) { @@ -161,7 +161,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) { @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) { @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) { @@ -215,7 +215,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) { @@ -224,7 +224,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) { @@ -233,7 +233,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) { @@ -242,7 +242,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) { @@ -251,7 +251,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) { @@ -260,7 +260,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) { @@ -269,7 +269,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) { @@ -278,7 +278,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) { @@ -287,7 +287,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) { @@ -296,7 +296,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) { @@ -305,7 +305,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) { @@ -314,7 +314,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) { @@ -323,7 +323,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) { @@ -332,7 +332,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) { @@ -341,7 +341,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t src, size_t vl) { @@ -735,7 +735,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4 (vint8mf8_t src, size_t vl) { @@ -744,7 +744,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2 (vint8mf4_t src, size_t vl) { @@ -753,7 +753,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfwcvt_f_x_v_f16m1 (vint8mf2_t src, size_t vl) { @@ -762,7 +762,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfwcvt_f_x_v_f16m2 (vint8m1_t src, size_t vl) { @@ -771,7 +771,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfwcvt_f_x_v_f16m4 (vint8m2_t src, size_t vl) { @@ -780,7 +780,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfwcvt_f_x_v_f16m8 (vint8m4_t src, size_t vl) { @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4 (vuint8mf8_t src, size_t vl) { @@ -798,7 +798,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2 (vuint8mf4_t src, size_t vl) { @@ -807,7 +807,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfwcvt_f_xu_v_f16m1 (vuint8mf2_t src, size_t vl) { @@ -816,7 +816,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfwcvt_f_xu_v_f16m2 (vuint8m1_t src, size_t vl) { @@ -825,7 +825,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfwcvt_f_xu_v_f16m4 (vuint8m2_t src, size_t vl) { @@ -834,7 +834,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfwcvt_f_xu_v_f16m8 (vuint8m4_t src, size_t vl) { @@ -843,7 +843,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfwcvt_x_f_v_i32mf2 (vfloat16mf4_t src, size_t vl) { @@ -852,7 +852,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2 (vfloat16mf4_t src, size_t vl) { @@ -861,7 +861,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfwcvt_x_f_v_i32m1 (vfloat16mf2_t src, size_t vl) { @@ -870,7 +870,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1 (vfloat16mf2_t src, size_t vl) { @@ -879,7 +879,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfwcvt_x_f_v_i32m2 (vfloat16m1_t src, size_t vl) { @@ -888,7 +888,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2 (vfloat16m1_t src, size_t vl) { @@ -897,7 +897,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfwcvt_x_f_v_i32m4 (vfloat16m2_t src, size_t vl) { @@ -906,7 +906,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4 (vfloat16m2_t src, size_t vl) { @@ -915,7 +915,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfwcvt_x_f_v_i32m8 (vfloat16m4_t src, size_t vl) { @@ -924,7 +924,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8 (vfloat16m4_t src, size_t vl) { @@ -933,7 +933,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2 (vfloat16mf4_t src, size_t vl) { @@ -942,7 +942,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2 (vfloat16mf4_t src, size_t vl) { @@ -951,7 +951,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfwcvt_xu_f_v_u32m1 (vfloat16mf2_t src, size_t vl) { @@ -960,7 +960,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1 (vfloat16mf2_t src, size_t vl) { @@ -969,7 +969,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfwcvt_xu_f_v_u32m2 (vfloat16m1_t src, size_t vl) { @@ -978,7 +978,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2 (vfloat16m1_t src, size_t vl) { @@ -987,7 +987,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfwcvt_xu_f_v_u32m4 (vfloat16m2_t src, size_t vl) { @@ -996,7 +996,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4 (vfloat16m2_t src, size_t vl) { @@ -1005,7 +1005,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfwcvt_xu_f_v_u32m8 (vfloat16m4_t src, size_t vl) { @@ -1014,7 +1014,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8 (vfloat16m4_t src, size_t vl) { @@ -1023,7 +1023,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2 (vfloat16mf4_t src, size_t vl) { @@ -1032,7 +1032,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_f_v_f32m1 (vfloat16mf2_t src, size_t vl) { @@ -1041,7 +1041,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_f_v_f32m2 (vfloat16m1_t src, size_t vl) { @@ -1050,7 +1050,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_f_v_f32m4 (vfloat16m2_t src, size_t vl) { @@ -1059,7 +1059,7 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_f_v_f32m8 (vfloat16m4_t src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c @@ -6,154 +6,154 @@ // CHECK-RV64-LABEL: @test_vid_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv1i8.i64(i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv1i8.i64( undef, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vid_v_u8mf8(size_t vl) { return vid_v_u8mf8(vl); } // CHECK-RV64-LABEL: @test_vid_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv2i8.i64(i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv2i8.i64( undef, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vid_v_u8mf4(size_t vl) { return vid_v_u8mf4(vl); } // CHECK-RV64-LABEL: @test_vid_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv4i8.i64(i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv4i8.i64( undef, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vid_v_u8mf2(size_t vl) { return vid_v_u8mf2(vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv8i8.i64(i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv8i8.i64( undef, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vid_v_u8m1(size_t vl) { return vid_v_u8m1(vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv16i8.i64(i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv16i8.i64( undef, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vid_v_u8m2(size_t vl) { return vid_v_u8m2(vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv32i8.i64(i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv32i8.i64( undef, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vid_v_u8m4(size_t vl) { return vid_v_u8m4(vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv64i8.i64(i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv64i8.i64( undef, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vid_v_u8m8(size_t vl) { return vid_v_u8m8(vl); } // CHECK-RV64-LABEL: @test_vid_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv1i16.i64(i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv1i16.i64( undef, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vid_v_u16mf4(size_t vl) { return vid_v_u16mf4(vl); } // CHECK-RV64-LABEL: @test_vid_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv2i16.i64(i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv2i16.i64( undef, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vid_v_u16mf2(size_t vl) { return vid_v_u16mf2(vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv4i16.i64(i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv4i16.i64( undef, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vid_v_u16m1(size_t vl) { return vid_v_u16m1(vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv8i16.i64(i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv8i16.i64( undef, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vid_v_u16m2(size_t vl) { return vid_v_u16m2(vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv16i16.i64(i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv16i16.i64( undef, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vid_v_u16m4(size_t vl) { return vid_v_u16m4(vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv32i16.i64(i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv32i16.i64( undef, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vid_v_u16m8(size_t vl) { return vid_v_u16m8(vl); } // CHECK-RV64-LABEL: @test_vid_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv1i32.i64(i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv1i32.i64( undef, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vid_v_u32mf2(size_t vl) { return vid_v_u32mf2(vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv2i32.i64(i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv2i32.i64( undef, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vid_v_u32m1(size_t vl) { return vid_v_u32m1(vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv4i32.i64(i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv4i32.i64( undef, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vid_v_u32m2(size_t vl) { return vid_v_u32m2(vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv8i32.i64(i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv8i32.i64( undef, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vid_v_u32m4(size_t vl) { return vid_v_u32m4(vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv16i32.i64(i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv16i32.i64( undef, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vid_v_u32m8(size_t vl) { return vid_v_u32m8(vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv1i64.i64(i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv1i64.i64( undef, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vid_v_u64m1(size_t vl) { return vid_v_u64m1(vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv2i64.i64(i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv2i64.i64( undef, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vid_v_u64m2(size_t vl) { return vid_v_u64m2(vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv4i64.i64(i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv4i64.i64( undef, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vid_v_u64m4(size_t vl) { return vid_v_u64m4(vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv8i64.i64(i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv8i64.i64( undef, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vid_v_u64m8(size_t vl) { return vid_v_u64m8(vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv1i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv1i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_viota_m_u8mf8(vbool64_t op1, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv2i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv2i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_viota_m_u8mf4(vbool32_t op1, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv4i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv4i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_viota_m_u8mf2(vbool16_t op1, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv8i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv8i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_viota_m_u8m1(vbool8_t op1, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv16i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv16i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_viota_m_u8m2(vbool4_t op1, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv32i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv32i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_viota_m_u8m4(vbool2_t op1, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv64i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv64i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_viota_m_u8m8(vbool1_t op1, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv1i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv1i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_viota_m_u16mf4(vbool64_t op1, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv2i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv2i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_viota_m_u16mf2(vbool32_t op1, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv4i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv4i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_viota_m_u16m1(vbool16_t op1, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv8i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv8i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_viota_m_u16m2(vbool8_t op1, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv16i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv16i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_viota_m_u16m4(vbool4_t op1, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv32i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv32i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_viota_m_u16m8(vbool2_t op1, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv1i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_viota_m_u32mf2(vbool64_t op1, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv2i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv2i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_viota_m_u32m1(vbool32_t op1, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv4i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv4i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_viota_m_u32m2(vbool16_t op1, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv8i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv8i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_viota_m_u32m4(vbool8_t op1, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv16i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv16i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_viota_m_u32m8(vbool4_t op1, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv1i64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv1i64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_viota_m_u64m1(vbool64_t op1, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv2i64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv2i64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_viota_m_u64m2(vbool32_t op1, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv4i64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv4i64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_viota_m_u64m4(vbool16_t op1, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv8i64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv8i64.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_viota_m_u64m8(vbool8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i16.nxv1i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i16.nxv2i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i16.nxv4i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i16.nxv8i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i16.nxv16i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv32i16.nxv32i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i32.nxv2i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i32.nxv2i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf4_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i32.nxv4i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i32.nxv4i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf4_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i32.nxv8i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i32.nxv8i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf4_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i32.nxv16i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i32.nxv16i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i32.nxv2i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i32.nxv4i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i32.nxv8i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i32.nxv16i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf4_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf4_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf4_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf2_i64m8(vint32m4_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i16.nxv1i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t op1, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i16.nxv2i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t op1, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i16.nxv4i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t op1, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i16.nxv8i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t op1, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i16.nxv16i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t op1, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv32i16.nxv32i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t op1, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i32.nxv2i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i32.nxv2i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf4_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i32.nxv4i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i32.nxv4i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf4_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i32.nxv8i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i32.nxv8i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf4_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i32.nxv16i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i32.nxv16i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i8.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t op1, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i32.nxv2i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t op1, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i32.nxv4i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t op1, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i32.nxv8i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t op1, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i32.nxv16i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t op1, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf4_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf4_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf4_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t op1, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t op1, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf2_u64m8(vuint32m4_t op1, size_t vl) { diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -350,12 +350,12 @@ let VLOperand = 4; } // For destination vector type is the same as source vector. - // Input: (vector_in, vl) + // Input: (passthru, vector_in, vl) class RISCVUnaryAANoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 1; + let VLOperand = 2; } // For destination vector type is the same as first source vector (with mask). // Input: (vector_in, mask, vl, ta) @@ -367,7 +367,7 @@ [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { let VLOperand = 3; } - class RISCVUnaryAAMaskNoTA + class RISCVUnaryAAMaskTU : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], @@ -583,21 +583,23 @@ } // For FP classify operations. // Output: (bit mask type output) - // Input: (vector_in, vl) + // Input: (passthru, vector_in, vl) class RISCVClassifyNoMask : Intrinsic<[LLVMVectorOfBitcastsToInt<0>], - [llvm_anyvector_ty, llvm_anyint_ty], + [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty, + llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { let VLOperand = 1; } // For FP classify operations with mask. // Output: (bit mask type output) - // Input: (maskedoff, vector_in, mask, vl) + // Input: (maskedoff, vector_in, mask, vl, policy) class RISCVClassifyMask : Intrinsic<[LLVMVectorOfBitcastsToInt<0>], [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic { + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_anyint_ty, LLVMMatchType<1>], + [IntrNoMem, ImmArg>]>, RISCVVIntrinsic { let VLOperand = 3; } // For Saturating binary operations. @@ -760,12 +762,12 @@ let VLOperand = 2; } // For destination vector type is NOT the same as source vector. - // Input: (vector_in, vl) + // Input: (passthru, vector_in, vl) class RISCVUnaryABNoMask : Intrinsic<[llvm_anyvector_ty], - [llvm_anyvector_ty, llvm_anyint_ty], + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 1; + let VLOperand = 2; } // For destination vector type is NOT the same as source vector (with mask). // Input: (maskedoff, vector_in, mask, vl, ta) @@ -800,17 +802,24 @@ // Input: (vl) class RISCVNullaryIntrinsic : Intrinsic<[llvm_anyvector_ty], - [llvm_anyint_ty], + [llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 1; + } + // Output: (vector) + // Input: (passthru, vl) + class RISCVNullaryIntrinsicTU + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 0; + let VLOperand = 1; } // For Conversion unary operations. - // Input: (vector_in, vl) + // Input: (passthru, vector_in, vl) class RISCVConversionNoMask : Intrinsic<[llvm_anyvector_ty], - [llvm_anyvector_ty, llvm_anyint_ty], + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 1; + let VLOperand = 2; } // For Conversion unary operations with mask. // Input: (maskedoff, vector_in, mask, vl, ta) @@ -1268,7 +1277,7 @@ let VLOperand = 1; } def int_riscv_vfmv_v_f : Intrinsic<[llvm_anyfloat_ty], - [LLVMVectorElementType<0>, llvm_anyint_ty], + [LLVMVectorElementType<0>,llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { let VLOperand = 1; } @@ -1340,7 +1349,7 @@ defm vrgather_vx : RISCVRGatherVX; defm vrgatherei16_vv : RISCVRGatherEI16VV; - def "int_riscv_vcompress" : RISCVUnaryAAMaskNoTA; + def "int_riscv_vcompress" : RISCVUnaryAAMaskTU; defm vaaddu : RISCVSaturatingBinaryAAX; defm vaadd : RISCVSaturatingBinaryAAX; @@ -1424,12 +1433,13 @@ defm vfncvt_rod_f_f_w : RISCVConversion; // Output: (vector) - // Input: (mask type input, vl) + // Input: (passthru, mask type input, vl) def int_riscv_viota : Intrinsic<[llvm_anyvector_ty], - [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + [LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 1; + let VLOperand = 2; } // Output: (vector) // Input: (maskedoff, mask type vector_in, mask, vl) @@ -1442,8 +1452,8 @@ let VLOperand = 3; } // Output: (vector) - // Input: (vl) - def int_riscv_vid : RISCVNullaryIntrinsic; + // Input: (passthru, vl) + def int_riscv_vid : RISCVNullaryIntrinsicTU; // Output: (vector) // Input: (maskedoff, mask, vl) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -872,6 +872,21 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoNullaryNoMaskTU: + Pseudo<(outs RegClass:$rd), + (ins RegClass:$merge, AVL:$vl, ixlenimm:$sew), + []>, RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let Constraints = "$rd = $merge"; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let HasMergeOp = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoNullaryMask: Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, VMaskOp:$vm, AVL:$vl, @@ -916,6 +931,22 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +// RetClass could be GPR or VReg. +class VPseudoUnaryNoMaskTU : + Pseudo<(outs RetClass:$rd), + (ins RetClass:$merge, OpClass:$rs2, AVL:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let HasMergeOp = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoUnaryMask : Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, OpClass:$rs2, @@ -1647,6 +1678,8 @@ let VLMul = m.value in { def "_V_" # m.MX : VPseudoNullaryNoMask, Sched<[WriteVMIdxV, ReadVMask]>; + def "_V_" # m.MX # "_TU": VPseudoNullaryNoMaskTU, + Sched<[WriteVMIdxV, ReadVMask]>; def "_V_" # m.MX # "_MASK" : VPseudoNullaryMask, Sched<[WriteVMIdxV, ReadVMask]>; } @@ -1667,6 +1700,8 @@ let VLMul = m.value in { def "_" # m.MX : VPseudoUnaryNoMask, Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>; + def "_" # m.MX # "_TU" : VPseudoUnaryNoMaskTU, + Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>; def "_" # m.MX # "_MASK" : VPseudoUnaryMask, Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>; } @@ -1986,7 +2021,9 @@ let VLMul = m.value in { def "_V_" # m.MX : VPseudoUnaryNoMask, Sched<[WriteVFClassV, ReadVFClassV, ReadVMask]>; - def "_V_" # m.MX # "_MASK" : VPseudoUnaryMask, + def "_V_" # m.MX # "_TU": VPseudoUnaryNoMaskTU, + Sched<[WriteVFClassV, ReadVFClassV, ReadVMask]>; + def "_V_" # m.MX # "_MASK" : VPseudoUnaryMaskTA, Sched<[WriteVFClassV, ReadVFClassV, ReadVMask]>; } } @@ -1997,6 +2034,8 @@ let VLMul = m.value in { def "_V_" # m.MX : VPseudoUnaryNoMask, Sched<[WriteVFSqrtV, ReadVFSqrtV, ReadVMask]>; + def "_V_" # m.MX # "_TU": VPseudoUnaryNoMaskTU, + Sched<[WriteVFSqrtV, ReadVFSqrtV, ReadVMask]>; def "_V_" # m.MX # "_MASK" : VPseudoUnaryMaskTA, Sched<[WriteVFSqrtV, ReadVFSqrtV, ReadVMask]>; } @@ -2008,6 +2047,8 @@ let VLMul = m.value in { def "_V_" # m.MX : VPseudoUnaryNoMask, Sched<[WriteVFRecpV, ReadVFRecpV, ReadVMask]>; + def "_V_" # m.MX # "_TU": VPseudoUnaryNoMaskTU, + Sched<[WriteVFRecpV, ReadVFRecpV, ReadVMask]>; def "_V_" # m.MX # "_MASK" : VPseudoUnaryMaskTA, Sched<[WriteVFRecpV, ReadVFRecpV, ReadVMask]>; } @@ -2021,6 +2062,8 @@ let VLMul = m.value in { def "_" # m.MX : VPseudoUnaryNoMask, Sched<[WriteVExtV, ReadVExtV, ReadVMask]>; + def "_" # m.MX # "_TU": VPseudoUnaryNoMaskTU, + Sched<[WriteVExtV, ReadVExtV, ReadVMask]>; def "_" # m.MX # "_MASK" : VPseudoUnaryMaskTA, Sched<[WriteVExtV, ReadVExtV, ReadVMask]>; @@ -2035,6 +2078,8 @@ let VLMul = m.value in { def "_" # m.MX : VPseudoUnaryNoMask, Sched<[WriteVExtV, ReadVExtV, ReadVMask]>; + def "_" # m.MX # "_TU": VPseudoUnaryNoMaskTU, + Sched<[WriteVExtV, ReadVExtV, ReadVMask]>; def "_" # m.MX # "_MASK" : VPseudoUnaryMaskTA, Sched<[WriteVExtV, ReadVExtV, ReadVMask]>; @@ -2049,6 +2094,8 @@ let VLMul = m.value in { def "_" # m.MX : VPseudoUnaryNoMask, Sched<[WriteVExtV, ReadVExtV, ReadVMask]>; + def "_" # m.MX # "_TU": VPseudoUnaryNoMaskTU, + Sched<[WriteVExtV, ReadVExtV, ReadVMask]>; def "_" # m.MX # "_MASK" : VPseudoUnaryMaskTA, Sched<[WriteVExtV, ReadVExtV, ReadVMask]>; @@ -2572,6 +2619,7 @@ string Constraint = ""> { let VLMul = MInfo.value in { def "_" # MInfo.MX : VPseudoUnaryNoMask; + def "_" # MInfo.MX # "_TU": VPseudoUnaryNoMaskTU; def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMaskTA; } @@ -2773,12 +2821,31 @@ LMULInfo vlmul, VReg op2_reg_class> : Pat<(result_type (!cast(intrinsic_name) + (result_type undef), (op2_type op2_reg_class:$rs2), VLOpFrag)), (!cast(inst#"_"#kind#"_"#vlmul.MX) (op2_type op2_reg_class:$rs2), GPR:$vl, sew)>; +class VPatUnaryNoMaskTU : + Pat<(result_type (!cast(intrinsic_name) + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + VLOpFrag)), + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_TU") + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + GPR:$vl, sew)>; + class VPatUnaryMask; + def : VPatUnaryNoMaskTU; def : VPatUnaryMask; } @@ -3204,6 +3273,9 @@ def : VPatUnaryNoMask; + def : VPatUnaryNoMaskTU; def : VPatUnaryMaskTA; @@ -3216,6 +3288,9 @@ def : VPatUnaryNoMask; + def : VPatUnaryNoMaskTU; def : VPatUnaryMaskTA; @@ -3226,9 +3301,15 @@ { foreach vti = AllIntegerVectors in { def : Pat<(vti.Vector (!cast(intrinsic) + (vti.Vector undef), VLOpFrag)), (!cast(instruction#"_V_" # vti.LMul.MX) GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (!cast(intrinsic) + (vti.Vector vti.RegClass:$merge), + VLOpFrag)), + (!cast(instruction#"_V_" # vti.LMul.MX # "_TU") + vti.RegClass:$merge, GPR:$vl, vti.Log2SEW)>; def : Pat<(vti.Vector (!cast(intrinsic # "_mask") (vti.Vector vti.RegClass:$merge), (vti.Mask V0), VLOpFrag)), @@ -3376,6 +3457,8 @@ { def : VPatUnaryNoMask; + def : VPatUnaryNoMaskTU; def : VPatUnaryMaskTA; } @@ -3905,19 +3988,6 @@ } } -multiclass VPatClassifyVI_VF -{ - foreach fvti = AllFloatVectors in - { - defvar ivti = GetIntVTypeInfo.Vti; - - defm : VPatConversion; - } -} - multiclass VPatConversionVI_VF { @@ -5056,7 +5126,7 @@ //===----------------------------------------------------------------------===// // 14.14. Vector Floating-Point Classify Instruction //===----------------------------------------------------------------------===// -defm : VPatClassifyVI_VF<"int_riscv_vfclass", "PseudoVFCLASS">; +defm : VPatConversionVI_VF<"int_riscv_vfclass", "PseudoVFCLASS">; //===----------------------------------------------------------------------===// // 14.15. Vector Floating-Point Merge Instruction diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll --- a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll @@ -2203,3 +2203,784 @@ ret %a } + +declare @llvm.riscv.vsext.nxv1i64.nxv1i8( + , + , + iXLen); + +define @intrinsic_vsext_vf8_nxv1i64( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vsext_vf8_nxv1i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; RV32-NEXT: vsext.vf8 v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsext_vf8_nxv1i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; RV64-NEXT: vsext.vf8 v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsext.nxv1i64.nxv1i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vzext.nxv1i64.nxv1i8( + , + , + iXLen); + +define @intrinsic_vzext_vf8_nxv1i64( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vzext_vf8_nxv1i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; RV32-NEXT: vzext.vf8 v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vzext_vf8_nxv1i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; RV64-NEXT: vzext.vf8 v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vzext.nxv1i64.nxv1i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; RV32-NEXT: vfncvt.x.f.w v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; RV64-NEXT: vfncvt.x.f.w v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv1i8( + , + iXLen); + +define @intrinsic_vid_v_nxv1i8( %0, iXLen %1) nounwind { +; RV32-LABEL: intrinsic_vid_v_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vid.v v8 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vid_v_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vid.v v8 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vid.nxv1i8( + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv1i16( + , + , + iXLen); + +define @intrinsic_vfclass_v_nxv1i16_nxv1f16( +; RV32-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfclass.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfclass.v v8, v9 +; RV64-NEXT: ret + %0, + %1, + iXLen %2) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv1i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfcvt.f.x.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfcvt.f.x.v v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfcvt.f.xu.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfcvt.f.xu.v v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfcvt.rtz.x.f.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfcvt.rtz.x.f.v v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfcvt.rtz.xu.f.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfcvt.rtz.xu.f.v v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfcvt.x.f.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfcvt.x.f.v v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfncvt.f.f.w v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfncvt.f.f.w v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfcvt.xu.f.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfcvt.xu.f.v v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfncvt.f.x.w v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfncvt.f.x.w v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfncvt.f.xu.w v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfncvt.f.xu.w v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32( + , + , + iXLen); + +define @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfncvt.rod.f.f.w v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfncvt.rod.f.f.w v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vfncvt.rtz.x.f.w v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vfncvt.rtz.x.f.w v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vfncvt.rtz.xu.f.w v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vfncvt.rtz.xu.f.w v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vfncvt.x.f.w v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vfncvt.x.f.w v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vfncvt.xu.f.w v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vfncvt.xu.f.w v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfrec7.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfrec7_v_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfrec7_v_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfrec7.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfrec7_v_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfrec7.v v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfrsqrt7.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfrsqrt7.v v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfsqrt_v_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfsqrt.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfsqrt.v v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfsqrt.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfwcvt.f.f.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfwcvt.f.f.v v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vfwcvt.f.x.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vfwcvt.f.x.v v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vfwcvt.f.xu.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vfwcvt.f.xu.v v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfwcvt.rtz.x.f.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfwcvt.rtz.x.f.v v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfwcvt.rtz.xu.f.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfwcvt.rtz.xu.f.v v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfwcvt.x.f.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfwcvt.x.f.v v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfwcvt.xu.f.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfwcvt.xu.f.v v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv1i8( + , + , + iXLen); + +define @intrinsic_viota_m_nxv1i8_nxv1i1( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: viota.m v8, v0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: viota.m v8, v0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.viota.nxv1i8( + %0, + %1, + iXLen %2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfclass.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfclass.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfclass.nxv1i16( + , , iXLen); @@ -17,6 +18,7 @@ iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv1i16( + undef, %0, iXLen %1) @@ -27,7 +29,7 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16: @@ -44,12 +46,13 @@ %0, %1, %2, - iXLen %3) + iXLen %3, iXLen 0) ret %a } declare @llvm.riscv.vfclass.nxv2i16( + , , iXLen); @@ -63,6 +66,7 @@ iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv2i16( + undef, %0, iXLen %1) @@ -73,7 +77,7 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfclass_mask_v_nxv2i16_nxv2f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i16_nxv2f16: @@ -90,12 +94,13 @@ %0, %1, %2, - iXLen %3) + iXLen %3, iXLen 0) ret %a } declare @llvm.riscv.vfclass.nxv4i16( + , , iXLen); @@ -109,6 +114,7 @@ iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv4i16( + undef, %0, iXLen %1) @@ -119,7 +125,7 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfclass_mask_v_nxv4i16_nxv4f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i16_nxv4f16: @@ -136,12 +142,13 @@ %0, %1, %2, - iXLen %3) + iXLen %3, iXLen 0) ret %a } declare @llvm.riscv.vfclass.nxv8i16( + , , iXLen); @@ -155,6 +162,7 @@ iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv8i16( + undef, %0, iXLen %1) @@ -165,7 +173,7 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfclass_mask_v_nxv8i16_nxv8f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i16_nxv8f16: @@ -182,12 +190,13 @@ %0, %1, %2, - iXLen %3) + iXLen %3, iXLen 0) ret %a } declare @llvm.riscv.vfclass.nxv16i16( + , , iXLen); @@ -201,6 +210,7 @@ iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv16i16( + undef, %0, iXLen %1) @@ -211,7 +221,7 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfclass_mask_v_nxv16i16_nxv16f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i16_nxv16f16: @@ -228,12 +238,13 @@ %0, %1, %2, - iXLen %3) + iXLen %3, iXLen 0) ret %a } declare @llvm.riscv.vfclass.nxv32i16( + , , iXLen); @@ -247,6 +258,7 @@ iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv32i16( + undef, %0, iXLen %1) @@ -257,7 +269,7 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfclass_mask_v_nxv32i16_nxv32f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv32i16_nxv32f16: @@ -274,12 +286,13 @@ %0, %1, %2, - iXLen %3) + iXLen %3, iXLen 0) ret %a } declare @llvm.riscv.vfclass.nxv1i32( + , , iXLen); @@ -293,6 +306,7 @@ iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv1i32( + undef, %0, iXLen %1) @@ -303,7 +317,7 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfclass_mask_v_nxv1i32_nxv1f32( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i32_nxv1f32: @@ -320,12 +334,13 @@ %0, %1, %2, - iXLen %3) + iXLen %3, iXLen 0) ret %a } declare @llvm.riscv.vfclass.nxv2i32( + , , iXLen); @@ -339,6 +354,7 @@ iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv2i32( + undef, %0, iXLen %1) @@ -349,7 +365,7 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfclass_mask_v_nxv2i32_nxv2f32( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i32_nxv2f32: @@ -366,12 +382,13 @@ %0, %1, %2, - iXLen %3) + iXLen %3, iXLen 0) ret %a } declare @llvm.riscv.vfclass.nxv4i32( + , , iXLen); @@ -385,6 +402,7 @@ iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv4i32( + undef, %0, iXLen %1) @@ -395,7 +413,7 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfclass_mask_v_nxv4i32_nxv4f32( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i32_nxv4f32: @@ -412,12 +430,13 @@ %0, %1, %2, - iXLen %3) + iXLen %3, iXLen 0) ret %a } declare @llvm.riscv.vfclass.nxv8i32( + , , iXLen); @@ -431,6 +450,7 @@ iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv8i32( + undef, %0, iXLen %1) @@ -441,7 +461,7 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfclass_mask_v_nxv8i32_nxv8f32( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i32_nxv8f32: @@ -458,12 +478,13 @@ %0, %1, %2, - iXLen %3) + iXLen %3, iXLen 0) ret %a } declare @llvm.riscv.vfclass.nxv16i32( + , , iXLen); @@ -477,6 +498,7 @@ iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv16i32( + undef, %0, iXLen %1) @@ -487,7 +509,7 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfclass_mask_v_nxv16i32_nxv16f32( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i32_nxv16f32: @@ -504,12 +526,13 @@ %0, %1, %2, - iXLen %3) + iXLen %3, iXLen 0) ret %a } declare @llvm.riscv.vfclass.nxv1i64( + , , iXLen); @@ -523,6 +546,7 @@ iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv1i64( + undef, %0, iXLen %1) @@ -533,7 +557,7 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfclass_mask_v_nxv1i64_nxv1f64( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i64_nxv1f64: @@ -550,12 +574,13 @@ %0, %1, %2, - iXLen %3) + iXLen %3, iXLen 0) ret %a } declare @llvm.riscv.vfclass.nxv2i64( + , , iXLen); @@ -569,6 +594,7 @@ iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv2i64( + undef, %0, iXLen %1) @@ -579,7 +605,7 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfclass_mask_v_nxv2i64_nxv2f64( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i64_nxv2f64: @@ -596,12 +622,13 @@ %0, %1, %2, - iXLen %3) + iXLen %3, iXLen 0) ret %a } declare @llvm.riscv.vfclass.nxv4i64( + , , iXLen); @@ -615,6 +642,7 @@ iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv4i64( + undef, %0, iXLen %1) @@ -625,7 +653,7 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfclass_mask_v_nxv4i64_nxv4f64( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i64_nxv4f64: @@ -642,12 +670,13 @@ %0, %1, %2, - iXLen %3) + iXLen %3, iXLen 0) ret %a } declare @llvm.riscv.vfclass.nxv8i64( + , , iXLen); @@ -661,6 +690,7 @@ iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv8i64( + undef, %0, iXLen %1) @@ -671,7 +701,7 @@ , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfclass_mask_v_nxv8i64_nxv8f64( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i64_nxv8f64: @@ -688,7 +718,7 @@ %0, %1, %2, - iXLen %3) + iXLen %3, iXLen 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16( + , , iXLen); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16( + undef, %0, iXLen %1) @@ -45,6 +47,7 @@ } declare @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16( + , , iXLen); @@ -56,6 +59,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16( + undef, %0, iXLen %1) @@ -86,6 +90,7 @@ } declare @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16( + , , iXLen); @@ -97,6 +102,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16( + undef, %0, iXLen %1) @@ -127,6 +133,7 @@ } declare @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16( + , , iXLen); @@ -138,6 +145,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16( + undef, %0, iXLen %1) @@ -168,6 +176,7 @@ } declare @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16( + , , iXLen); @@ -179,6 +188,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16( + undef, %0, iXLen %1) @@ -209,6 +219,7 @@ } declare @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16( + , , iXLen); @@ -220,6 +231,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16( + undef, %0, iXLen %1) @@ -250,6 +262,7 @@ } declare @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32( + , , iXLen); @@ -261,6 +274,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32( + undef, %0, iXLen %1) @@ -291,6 +305,7 @@ } declare @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32( + , , iXLen); @@ -302,6 +317,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32( + undef, %0, iXLen %1) @@ -332,6 +348,7 @@ } declare @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32( + , , iXLen); @@ -343,6 +360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32( + undef, %0, iXLen %1) @@ -373,6 +391,7 @@ } declare @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32( + , , iXLen); @@ -384,6 +403,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32( + undef, %0, iXLen %1) @@ -414,6 +434,7 @@ } declare @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32( + , , iXLen); @@ -425,6 +446,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32( + undef, %0, iXLen %1) @@ -455,6 +477,7 @@ } declare @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64( + , , iXLen); @@ -466,6 +489,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64( + undef, %0, iXLen %1) @@ -496,6 +520,7 @@ } declare @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64( + , , iXLen); @@ -507,6 +532,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64( + undef, %0, iXLen %1) @@ -537,6 +563,7 @@ } declare @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64( + , , iXLen); @@ -548,6 +575,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64( + undef, %0, iXLen %1) @@ -578,6 +606,7 @@ } declare @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64( + , , iXLen); @@ -589,6 +618,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64( + undef, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16( + , , iXLen); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16( + undef, %0, iXLen %1) @@ -45,6 +47,7 @@ } declare @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16( + , , iXLen); @@ -56,6 +59,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16( + undef, %0, iXLen %1) @@ -86,6 +90,7 @@ } declare @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16( + , , iXLen); @@ -97,6 +102,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16( + undef, %0, iXLen %1) @@ -127,6 +133,7 @@ } declare @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16( + , , iXLen); @@ -138,6 +145,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16( + undef, %0, iXLen %1) @@ -168,6 +176,7 @@ } declare @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16( + , , iXLen); @@ -179,6 +188,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16( + undef, %0, iXLen %1) @@ -209,6 +219,7 @@ } declare @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16( + , , iXLen); @@ -220,6 +231,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16( + undef, %0, iXLen %1) @@ -250,6 +262,7 @@ } declare @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32( + , , iXLen); @@ -261,6 +274,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32( + undef, %0, iXLen %1) @@ -291,6 +305,7 @@ } declare @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32( + , , iXLen); @@ -302,6 +317,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32( + undef, %0, iXLen %1) @@ -332,6 +348,7 @@ } declare @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32( + , , iXLen); @@ -343,6 +360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32( + undef, %0, iXLen %1) @@ -373,6 +391,7 @@ } declare @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32( + , , iXLen); @@ -384,6 +403,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32( + undef, %0, iXLen %1) @@ -414,6 +434,7 @@ } declare @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32( + , , iXLen); @@ -425,6 +446,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32( + undef, %0, iXLen %1) @@ -455,6 +477,7 @@ } declare @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64( + , , iXLen); @@ -466,6 +489,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64( + undef, %0, iXLen %1) @@ -496,6 +520,7 @@ } declare @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64( + , , iXLen); @@ -507,6 +532,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64( + undef, %0, iXLen %1) @@ -537,6 +563,7 @@ } declare @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64( + , , iXLen); @@ -548,6 +575,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64( + undef, %0, iXLen %1) @@ -578,6 +606,7 @@ } declare @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64( + , , iXLen); @@ -589,6 +618,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64( + undef, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16( + , , iXLen); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16( + undef, %0, iXLen %1) @@ -45,6 +47,7 @@ } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16( + , , iXLen); @@ -56,6 +59,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16( + undef, %0, iXLen %1) @@ -86,6 +90,7 @@ } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16( + , , iXLen); @@ -97,6 +102,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16( + undef, %0, iXLen %1) @@ -127,6 +133,7 @@ } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16( + , , iXLen); @@ -138,6 +145,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16( + undef, %0, iXLen %1) @@ -168,6 +176,7 @@ } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16( + , , iXLen); @@ -179,6 +188,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16( + undef, %0, iXLen %1) @@ -209,6 +219,7 @@ } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16( + , , iXLen); @@ -220,6 +231,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16( + undef, %0, iXLen %1) @@ -250,6 +262,7 @@ } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32( + , , iXLen); @@ -261,6 +274,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32( + undef, %0, iXLen %1) @@ -291,6 +305,7 @@ } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32( + , , iXLen); @@ -302,6 +317,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32( + undef, %0, iXLen %1) @@ -332,6 +348,7 @@ } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32( + , , iXLen); @@ -343,6 +360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32( + undef, %0, iXLen %1) @@ -373,6 +391,7 @@ } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32( + , , iXLen); @@ -384,6 +403,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32( + undef, %0, iXLen %1) @@ -414,6 +434,7 @@ } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32( + , , iXLen); @@ -425,6 +446,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32( + undef, %0, iXLen %1) @@ -455,6 +477,7 @@ } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64( + , , iXLen); @@ -466,6 +489,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64( + undef, %0, iXLen %1) @@ -496,6 +520,7 @@ } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64( + , , iXLen); @@ -507,6 +532,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64( + undef, %0, iXLen %1) @@ -537,6 +563,7 @@ } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64( + , , iXLen); @@ -548,6 +575,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64( + undef, %0, iXLen %1) @@ -578,6 +606,7 @@ } declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64( + , , iXLen); @@ -589,6 +618,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64( + undef, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16( + , , iXLen); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16( + undef, %0, iXLen %1) @@ -45,6 +47,7 @@ } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16( + , , iXLen); @@ -56,6 +59,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16( + undef, %0, iXLen %1) @@ -86,6 +90,7 @@ } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16( + , , iXLen); @@ -97,6 +102,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16( + undef, %0, iXLen %1) @@ -127,6 +133,7 @@ } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16( + , , iXLen); @@ -138,6 +145,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16( + undef, %0, iXLen %1) @@ -168,6 +176,7 @@ } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16( + , , iXLen); @@ -179,6 +188,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16( + undef, %0, iXLen %1) @@ -209,6 +219,7 @@ } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16( + , , iXLen); @@ -220,6 +231,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16( + undef, %0, iXLen %1) @@ -250,6 +262,7 @@ } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32( + , , iXLen); @@ -261,6 +274,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32( + undef, %0, iXLen %1) @@ -291,6 +305,7 @@ } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32( + , , iXLen); @@ -302,6 +317,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32( + undef, %0, iXLen %1) @@ -332,6 +348,7 @@ } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32( + , , iXLen); @@ -343,6 +360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32( + undef, %0, iXLen %1) @@ -373,6 +391,7 @@ } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32( + , , iXLen); @@ -384,6 +403,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32( + undef, %0, iXLen %1) @@ -414,6 +434,7 @@ } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32( + , , iXLen); @@ -425,6 +446,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32( + undef, %0, iXLen %1) @@ -455,6 +477,7 @@ } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64( + , , iXLen); @@ -466,6 +489,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64( + undef, %0, iXLen %1) @@ -496,6 +520,7 @@ } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64( + , , iXLen); @@ -507,6 +532,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64( + undef, %0, iXLen %1) @@ -537,6 +563,7 @@ } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64( + , , iXLen); @@ -548,6 +575,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64( + undef, %0, iXLen %1) @@ -578,6 +606,7 @@ } declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64( + , , iXLen); @@ -589,6 +618,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64( + undef, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16( + , , iXLen); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16( + undef, %0, iXLen %1) @@ -45,6 +47,7 @@ } declare @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16( + , , iXLen); @@ -56,6 +59,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16( + undef, %0, iXLen %1) @@ -86,6 +90,7 @@ } declare @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16( + , , iXLen); @@ -97,6 +102,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16( + undef, %0, iXLen %1) @@ -127,6 +133,7 @@ } declare @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16( + , , iXLen); @@ -138,6 +145,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16( + undef, %0, iXLen %1) @@ -168,6 +176,7 @@ } declare @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16( + , , iXLen); @@ -179,6 +188,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16( + undef, %0, iXLen %1) @@ -209,6 +219,7 @@ } declare @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16( + , , iXLen); @@ -220,6 +231,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16( + undef, %0, iXLen %1) @@ -250,6 +262,7 @@ } declare @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32( + , , iXLen); @@ -261,6 +274,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32( + undef, %0, iXLen %1) @@ -291,6 +305,7 @@ } declare @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32( + , , iXLen); @@ -302,6 +317,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32( + undef, %0, iXLen %1) @@ -332,6 +348,7 @@ } declare @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32( + , , iXLen); @@ -343,6 +360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32( + undef, %0, iXLen %1) @@ -373,6 +391,7 @@ } declare @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32( + , , iXLen); @@ -384,6 +403,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32( + undef, %0, iXLen %1) @@ -414,6 +434,7 @@ } declare @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32( + , , iXLen); @@ -425,6 +446,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32( + undef, %0, iXLen %1) @@ -455,6 +477,7 @@ } declare @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64( + , , iXLen); @@ -466,6 +489,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64( + undef, %0, iXLen %1) @@ -496,6 +520,7 @@ } declare @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64( + , , iXLen); @@ -507,6 +532,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64( + undef, %0, iXLen %1) @@ -537,6 +563,7 @@ } declare @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64( + , , iXLen); @@ -548,6 +575,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64( + undef, %0, iXLen %1) @@ -578,6 +606,7 @@ } declare @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64( + , , iXLen); @@ -589,6 +618,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64( + undef, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16( + , , iXLen); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16( + undef, %0, iXLen %1) @@ -45,6 +47,7 @@ } declare @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16( + , , iXLen); @@ -56,6 +59,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16( + undef, %0, iXLen %1) @@ -86,6 +90,7 @@ } declare @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16( + , , iXLen); @@ -97,6 +102,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16( + undef, %0, iXLen %1) @@ -127,6 +133,7 @@ } declare @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16( + , , iXLen); @@ -138,6 +145,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16( + undef, %0, iXLen %1) @@ -168,6 +176,7 @@ } declare @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16( + , , iXLen); @@ -179,6 +188,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16( + undef, %0, iXLen %1) @@ -209,6 +219,7 @@ } declare @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16( + , , iXLen); @@ -220,6 +231,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16( + undef, %0, iXLen %1) @@ -250,6 +262,7 @@ } declare @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32( + , , iXLen); @@ -261,6 +274,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32( + undef, %0, iXLen %1) @@ -291,6 +305,7 @@ } declare @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32( + , , iXLen); @@ -302,6 +317,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32( + undef, %0, iXLen %1) @@ -332,6 +348,7 @@ } declare @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32( + , , iXLen); @@ -343,6 +360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32( + undef, %0, iXLen %1) @@ -373,6 +391,7 @@ } declare @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32( + , , iXLen); @@ -384,6 +403,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32( + undef, %0, iXLen %1) @@ -414,6 +434,7 @@ } declare @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32( + , , iXLen); @@ -425,6 +446,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32( + undef, %0, iXLen %1) @@ -455,6 +477,7 @@ } declare @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64( + , , iXLen); @@ -466,6 +489,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64( + undef, %0, iXLen %1) @@ -496,6 +520,7 @@ } declare @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64( + , , iXLen); @@ -507,6 +532,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64( + undef, %0, iXLen %1) @@ -537,6 +563,7 @@ } declare @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64( + , , iXLen); @@ -548,6 +575,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64( + undef, %0, iXLen %1) @@ -578,6 +606,7 @@ } declare @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64( + , , iXLen); @@ -589,6 +618,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64( + undef, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32( + , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32( + undef, %0, iXLen %1) @@ -46,6 +48,7 @@ } declare @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32( + , , iXLen); @@ -58,6 +61,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32( + undef, %0, iXLen %1) @@ -88,6 +92,7 @@ } declare @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32( + , , iXLen); @@ -100,6 +105,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32( + undef, %0, iXLen %1) @@ -130,6 +136,7 @@ } declare @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32( + , , iXLen); @@ -142,6 +149,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32( + undef, %0, iXLen %1) @@ -172,6 +180,7 @@ } declare @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32( + , , iXLen); @@ -184,6 +193,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32( + undef, %0, iXLen %1) @@ -214,6 +224,7 @@ } declare @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64( + , , iXLen); @@ -226,6 +237,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64( + undef, %0, iXLen %1) @@ -256,6 +268,7 @@ } declare @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64( + , , iXLen); @@ -268,6 +281,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64( + undef, %0, iXLen %1) @@ -298,6 +312,7 @@ } declare @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64( + , , iXLen); @@ -310,6 +325,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64( + undef, %0, iXLen %1) @@ -340,6 +356,7 @@ } declare @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64( + , , iXLen); @@ -352,6 +369,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64( + undef, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32( + , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32( + undef, %0, iXLen %1) @@ -46,6 +48,7 @@ } declare @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32( + , , iXLen); @@ -58,6 +61,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32( + undef, %0, iXLen %1) @@ -88,6 +92,7 @@ } declare @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32( + , , iXLen); @@ -100,6 +105,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32( + undef, %0, iXLen %1) @@ -130,6 +136,7 @@ } declare @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32( + , , iXLen); @@ -142,6 +149,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32( + undef, %0, iXLen %1) @@ -172,6 +180,7 @@ } declare @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32( + , , iXLen); @@ -184,6 +193,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32( + undef, %0, iXLen %1) @@ -214,6 +224,7 @@ } declare @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64( + , , iXLen); @@ -226,6 +237,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64( + undef, %0, iXLen %1) @@ -256,6 +268,7 @@ } declare @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64( + , , iXLen); @@ -268,6 +281,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64( + undef, %0, iXLen %1) @@ -298,6 +312,7 @@ } declare @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64( + , , iXLen); @@ -310,6 +325,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64( + undef, %0, iXLen %1) @@ -340,6 +356,7 @@ } declare @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64( + , , iXLen); @@ -352,6 +369,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64( + undef, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32( + , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32( + undef, %0, iXLen %1) @@ -46,6 +48,7 @@ } declare @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32( + , , iXLen); @@ -58,6 +61,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32( + undef, %0, iXLen %1) @@ -88,6 +92,7 @@ } declare @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32( + , , iXLen); @@ -100,6 +105,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32( + undef, %0, iXLen %1) @@ -130,6 +136,7 @@ } declare @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32( + , , iXLen); @@ -142,6 +149,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32( + undef, %0, iXLen %1) @@ -172,6 +180,7 @@ } declare @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32( + , , iXLen); @@ -184,6 +193,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32( + undef, %0, iXLen %1) @@ -214,6 +224,7 @@ } declare @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64( + , , iXLen); @@ -226,6 +237,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64( + undef, %0, iXLen %1) @@ -256,6 +268,7 @@ } declare @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64( + , , iXLen); @@ -268,6 +281,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64( + undef, %0, iXLen %1) @@ -298,6 +312,7 @@ } declare @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64( + , , iXLen); @@ -310,6 +325,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64( + undef, %0, iXLen %1) @@ -340,6 +356,7 @@ } declare @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64( + , , iXLen); @@ -352,6 +369,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64( + undef, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32( + , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32( + undef, %0, iXLen %1) @@ -46,6 +48,7 @@ } declare @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32( + , , iXLen); @@ -58,6 +61,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32( + undef, %0, iXLen %1) @@ -88,6 +92,7 @@ } declare @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32( + , , iXLen); @@ -100,6 +105,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32( + undef, %0, iXLen %1) @@ -130,6 +136,7 @@ } declare @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32( + , , iXLen); @@ -142,6 +149,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32( + undef, %0, iXLen %1) @@ -172,6 +180,7 @@ } declare @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32( + , , iXLen); @@ -184,6 +193,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32( + undef, %0, iXLen %1) @@ -214,6 +224,7 @@ } declare @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64( + , , iXLen); @@ -226,6 +237,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64( + undef, %0, iXLen %1) @@ -256,6 +268,7 @@ } declare @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64( + , , iXLen); @@ -268,6 +281,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64( + undef, %0, iXLen %1) @@ -298,6 +312,7 @@ } declare @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64( + , , iXLen); @@ -310,6 +325,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64( + undef, %0, iXLen %1) @@ -340,6 +356,7 @@ } declare @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64( + , , iXLen); @@ -352,6 +369,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64( + undef, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16( + , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16( + undef, %0, iXLen %1) @@ -46,6 +48,7 @@ } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16( + , , iXLen); @@ -58,6 +61,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16( + undef, %0, iXLen %1) @@ -88,6 +92,7 @@ } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16( + , , iXLen); @@ -100,6 +105,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16( + undef, %0, iXLen %1) @@ -130,6 +136,7 @@ } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16( + , , iXLen); @@ -142,6 +149,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16( + undef, %0, iXLen %1) @@ -172,6 +180,7 @@ } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16( + , , iXLen); @@ -184,6 +193,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16( + undef, %0, iXLen %1) @@ -214,6 +224,7 @@ } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16( + , , iXLen); @@ -226,6 +237,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16( + undef, %0, iXLen %1) @@ -256,6 +268,7 @@ } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32( + , , iXLen); @@ -268,6 +281,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32( + undef, %0, iXLen %1) @@ -298,6 +312,7 @@ } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32( + , , iXLen); @@ -310,6 +325,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32( + undef, %0, iXLen %1) @@ -340,6 +356,7 @@ } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32( + , , iXLen); @@ -352,6 +369,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32( + undef, %0, iXLen %1) @@ -382,6 +400,7 @@ } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32( + , , iXLen); @@ -394,6 +413,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32( + undef, %0, iXLen %1) @@ -424,6 +444,7 @@ } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32( + , , iXLen); @@ -436,6 +457,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32( + undef, %0, iXLen %1) @@ -466,6 +488,7 @@ } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64( + , , iXLen); @@ -478,6 +501,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64( + undef, %0, iXLen %1) @@ -508,6 +532,7 @@ } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64( + , , iXLen); @@ -520,6 +545,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64( + undef, %0, iXLen %1) @@ -550,6 +576,7 @@ } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64( + , , iXLen); @@ -562,6 +589,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64( + undef, %0, iXLen %1) @@ -592,6 +620,7 @@ } declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64( + , , iXLen); @@ -604,6 +633,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64( + undef, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16( + , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16( + undef, %0, iXLen %1) @@ -46,6 +48,7 @@ } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16( + , , iXLen); @@ -58,6 +61,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16( + undef, %0, iXLen %1) @@ -88,6 +92,7 @@ } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16( + , , iXLen); @@ -100,6 +105,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16( + undef, %0, iXLen %1) @@ -130,6 +136,7 @@ } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16( + , , iXLen); @@ -142,6 +149,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16( + undef, %0, iXLen %1) @@ -172,6 +180,7 @@ } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16( + , , iXLen); @@ -184,6 +193,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16( + undef, %0, iXLen %1) @@ -214,6 +224,7 @@ } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16( + , , iXLen); @@ -226,6 +237,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16( + undef, %0, iXLen %1) @@ -256,6 +268,7 @@ } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32( + , , iXLen); @@ -268,6 +281,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32( + undef, %0, iXLen %1) @@ -298,6 +312,7 @@ } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32( + , , iXLen); @@ -310,6 +325,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32( + undef, %0, iXLen %1) @@ -340,6 +356,7 @@ } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32( + , , iXLen); @@ -352,6 +369,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32( + undef, %0, iXLen %1) @@ -382,6 +400,7 @@ } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32( + , , iXLen); @@ -394,6 +413,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32( + undef, %0, iXLen %1) @@ -424,6 +444,7 @@ } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32( + , , iXLen); @@ -436,6 +457,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32( + undef, %0, iXLen %1) @@ -466,6 +488,7 @@ } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64( + , , iXLen); @@ -478,6 +501,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64( + undef, %0, iXLen %1) @@ -508,6 +532,7 @@ } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64( + , , iXLen); @@ -520,6 +545,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64( + undef, %0, iXLen %1) @@ -550,6 +576,7 @@ } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64( + , , iXLen); @@ -562,6 +589,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64( + undef, %0, iXLen %1) @@ -592,6 +620,7 @@ } declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64( + , , iXLen); @@ -604,6 +633,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64( + undef, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16( + , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16( + undef, %0, iXLen %1) @@ -46,6 +48,7 @@ } declare @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16( + , , iXLen); @@ -58,6 +61,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16( + undef, %0, iXLen %1) @@ -88,6 +92,7 @@ } declare @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16( + , , iXLen); @@ -100,6 +105,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16( + undef, %0, iXLen %1) @@ -130,6 +136,7 @@ } declare @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16( + , , iXLen); @@ -142,6 +149,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16( + undef, %0, iXLen %1) @@ -172,6 +180,7 @@ } declare @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16( + , , iXLen); @@ -184,6 +193,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16( + undef, %0, iXLen %1) @@ -214,6 +224,7 @@ } declare @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16( + , , iXLen); @@ -226,6 +237,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16( + undef, %0, iXLen %1) @@ -256,6 +268,7 @@ } declare @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32( + , , iXLen); @@ -268,6 +281,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32( + undef, %0, iXLen %1) @@ -298,6 +312,7 @@ } declare @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32( + , , iXLen); @@ -310,6 +325,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32( + undef, %0, iXLen %1) @@ -340,6 +356,7 @@ } declare @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32( + , , iXLen); @@ -352,6 +369,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32( + undef, %0, iXLen %1) @@ -382,6 +400,7 @@ } declare @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32( + , , iXLen); @@ -394,6 +413,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32( + undef, %0, iXLen %1) @@ -424,6 +444,7 @@ } declare @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32( + , , iXLen); @@ -436,6 +457,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32( + undef, %0, iXLen %1) @@ -466,6 +488,7 @@ } declare @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64( + , , iXLen); @@ -478,6 +501,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64( + undef, %0, iXLen %1) @@ -508,6 +532,7 @@ } declare @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64( + , , iXLen); @@ -520,6 +545,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64( + undef, %0, iXLen %1) @@ -550,6 +576,7 @@ } declare @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64( + , , iXLen); @@ -562,6 +589,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64( + undef, %0, iXLen %1) @@ -592,6 +620,7 @@ } declare @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64( + , , iXLen); @@ -604,6 +633,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64( + undef, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16( + , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16( + undef, %0, iXLen %1) @@ -46,6 +48,7 @@ } declare @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16( + , , iXLen); @@ -58,6 +61,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16( + undef, %0, iXLen %1) @@ -88,6 +92,7 @@ } declare @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16( + , , iXLen); @@ -100,6 +105,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16( + undef, %0, iXLen %1) @@ -130,6 +136,7 @@ } declare @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16( + , , iXLen); @@ -142,6 +149,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16( + undef, %0, iXLen %1) @@ -172,6 +180,7 @@ } declare @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16( + , , iXLen); @@ -184,6 +193,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16( + undef, %0, iXLen %1) @@ -214,6 +224,7 @@ } declare @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16( + , , iXLen); @@ -226,6 +237,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16( + undef, %0, iXLen %1) @@ -256,6 +268,7 @@ } declare @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32( + , , iXLen); @@ -268,6 +281,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32( + undef, %0, iXLen %1) @@ -298,6 +312,7 @@ } declare @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32( + , , iXLen); @@ -310,6 +325,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32( + undef, %0, iXLen %1) @@ -340,6 +356,7 @@ } declare @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32( + , , iXLen); @@ -352,6 +369,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32( + undef, %0, iXLen %1) @@ -382,6 +400,7 @@ } declare @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32( + , , iXLen); @@ -394,6 +413,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32( + undef, %0, iXLen %1) @@ -424,6 +444,7 @@ } declare @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32( + , , iXLen); @@ -436,6 +457,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32( + undef, %0, iXLen %1) @@ -466,6 +488,7 @@ } declare @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64( + , , iXLen); @@ -478,6 +501,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64( + undef, %0, iXLen %1) @@ -508,6 +532,7 @@ } declare @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64( + , , iXLen); @@ -520,6 +545,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64( + undef, %0, iXLen %1) @@ -550,6 +576,7 @@ } declare @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64( + , , iXLen); @@ -562,6 +589,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64( + undef, %0, iXLen %1) @@ -592,6 +620,7 @@ } declare @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64( + , , iXLen); @@ -604,6 +633,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64( + undef, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll b/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfrec7.nxv1f16( + , , iXLen); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrec7.nxv1f16( + undef, %0, iXLen %1) @@ -45,6 +47,7 @@ } declare @llvm.riscv.vfrec7.nxv2f16( + , , iXLen); @@ -56,6 +59,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrec7.nxv2f16( + undef, %0, iXLen %1) @@ -86,6 +90,7 @@ } declare @llvm.riscv.vfrec7.nxv4f16( + , , iXLen); @@ -97,6 +102,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrec7.nxv4f16( + undef, %0, iXLen %1) @@ -127,6 +133,7 @@ } declare @llvm.riscv.vfrec7.nxv8f16( + , , iXLen); @@ -138,6 +145,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrec7.nxv8f16( + undef, %0, iXLen %1) @@ -168,6 +176,7 @@ } declare @llvm.riscv.vfrec7.nxv16f16( + , , iXLen); @@ -179,6 +188,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrec7.nxv16f16( + undef, %0, iXLen %1) @@ -209,6 +219,7 @@ } declare @llvm.riscv.vfrec7.nxv32f16( + , , iXLen); @@ -220,6 +231,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrec7.nxv32f16( + undef, %0, iXLen %1) @@ -250,6 +262,7 @@ } declare @llvm.riscv.vfrec7.nxv1f32( + , , iXLen); @@ -261,6 +274,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrec7.nxv1f32( + undef, %0, iXLen %1) @@ -291,6 +305,7 @@ } declare @llvm.riscv.vfrec7.nxv2f32( + , , iXLen); @@ -302,6 +317,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrec7.nxv2f32( + undef, %0, iXLen %1) @@ -332,6 +348,7 @@ } declare @llvm.riscv.vfrec7.nxv4f32( + , , iXLen); @@ -343,6 +360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrec7.nxv4f32( + undef, %0, iXLen %1) @@ -373,6 +391,7 @@ } declare @llvm.riscv.vfrec7.nxv8f32( + , , iXLen); @@ -384,6 +403,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrec7.nxv8f32( + undef, %0, iXLen %1) @@ -414,6 +434,7 @@ } declare @llvm.riscv.vfrec7.nxv16f32( + , , iXLen); @@ -425,6 +446,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrec7.nxv16f32( + undef, %0, iXLen %1) @@ -455,6 +477,7 @@ } declare @llvm.riscv.vfrec7.nxv1f64( + , , iXLen); @@ -466,6 +489,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrec7.nxv1f64( + undef, %0, iXLen %1) @@ -496,6 +520,7 @@ } declare @llvm.riscv.vfrec7.nxv2f64( + , , iXLen); @@ -507,6 +532,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrec7.nxv2f64( + undef, %0, iXLen %1) @@ -537,6 +563,7 @@ } declare @llvm.riscv.vfrec7.nxv4f64( + , , iXLen); @@ -548,6 +575,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrec7.nxv4f64( + undef, %0, iXLen %1) @@ -578,6 +606,7 @@ } declare @llvm.riscv.vfrec7.nxv8f64( + , , iXLen); @@ -589,6 +618,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrec7.nxv8f64( + undef, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfrsqrt7.nxv1f16( + , , iXLen); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsqrt7.nxv1f16( + undef, %0, iXLen %1) @@ -45,6 +47,7 @@ } declare @llvm.riscv.vfrsqrt7.nxv2f16( + , , iXLen); @@ -56,6 +59,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsqrt7.nxv2f16( + undef, %0, iXLen %1) @@ -86,6 +90,7 @@ } declare @llvm.riscv.vfrsqrt7.nxv4f16( + , , iXLen); @@ -97,6 +102,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsqrt7.nxv4f16( + undef, %0, iXLen %1) @@ -127,6 +133,7 @@ } declare @llvm.riscv.vfrsqrt7.nxv8f16( + , , iXLen); @@ -138,6 +145,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsqrt7.nxv8f16( + undef, %0, iXLen %1) @@ -168,6 +176,7 @@ } declare @llvm.riscv.vfrsqrt7.nxv16f16( + , , iXLen); @@ -179,6 +188,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsqrt7.nxv16f16( + undef, %0, iXLen %1) @@ -209,6 +219,7 @@ } declare @llvm.riscv.vfrsqrt7.nxv32f16( + , , iXLen); @@ -220,6 +231,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsqrt7.nxv32f16( + undef, %0, iXLen %1) @@ -250,6 +262,7 @@ } declare @llvm.riscv.vfrsqrt7.nxv1f32( + , , iXLen); @@ -261,6 +274,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsqrt7.nxv1f32( + undef, %0, iXLen %1) @@ -291,6 +305,7 @@ } declare @llvm.riscv.vfrsqrt7.nxv2f32( + , , iXLen); @@ -302,6 +317,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsqrt7.nxv2f32( + undef, %0, iXLen %1) @@ -332,6 +348,7 @@ } declare @llvm.riscv.vfrsqrt7.nxv4f32( + , , iXLen); @@ -343,6 +360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsqrt7.nxv4f32( + undef, %0, iXLen %1) @@ -373,6 +391,7 @@ } declare @llvm.riscv.vfrsqrt7.nxv8f32( + , , iXLen); @@ -384,6 +403,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsqrt7.nxv8f32( + undef, %0, iXLen %1) @@ -414,6 +434,7 @@ } declare @llvm.riscv.vfrsqrt7.nxv16f32( + , , iXLen); @@ -425,6 +446,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsqrt7.nxv16f32( + undef, %0, iXLen %1) @@ -455,6 +477,7 @@ } declare @llvm.riscv.vfrsqrt7.nxv1f64( + , , iXLen); @@ -466,6 +489,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsqrt7.nxv1f64( + undef, %0, iXLen %1) @@ -496,6 +520,7 @@ } declare @llvm.riscv.vfrsqrt7.nxv2f64( + , , iXLen); @@ -507,6 +532,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsqrt7.nxv2f64( + undef, %0, iXLen %1) @@ -537,6 +563,7 @@ } declare @llvm.riscv.vfrsqrt7.nxv4f64( + , , iXLen); @@ -548,6 +575,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsqrt7.nxv4f64( + undef, %0, iXLen %1) @@ -578,6 +606,7 @@ } declare @llvm.riscv.vfrsqrt7.nxv8f64( + , , iXLen); @@ -589,6 +618,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsqrt7.nxv8f64( + undef, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfsqrt.nxv1f16( + , , iXLen); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsqrt.nxv1f16( + undef, %0, iXLen %1) @@ -45,6 +47,7 @@ } declare @llvm.riscv.vfsqrt.nxv2f16( + , , iXLen); @@ -56,6 +59,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsqrt.nxv2f16( + undef, %0, iXLen %1) @@ -86,6 +90,7 @@ } declare @llvm.riscv.vfsqrt.nxv4f16( + , , iXLen); @@ -97,6 +102,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsqrt.nxv4f16( + undef, %0, iXLen %1) @@ -127,6 +133,7 @@ } declare @llvm.riscv.vfsqrt.nxv8f16( + , , iXLen); @@ -138,6 +145,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsqrt.nxv8f16( + undef, %0, iXLen %1) @@ -168,6 +176,7 @@ } declare @llvm.riscv.vfsqrt.nxv16f16( + , , iXLen); @@ -179,6 +188,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsqrt.nxv16f16( + undef, %0, iXLen %1) @@ -209,6 +219,7 @@ } declare @llvm.riscv.vfsqrt.nxv32f16( + , , iXLen); @@ -220,6 +231,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsqrt.nxv32f16( + undef, %0, iXLen %1) @@ -250,6 +262,7 @@ } declare @llvm.riscv.vfsqrt.nxv1f32( + , , iXLen); @@ -261,6 +274,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsqrt.nxv1f32( + undef, %0, iXLen %1) @@ -291,6 +305,7 @@ } declare @llvm.riscv.vfsqrt.nxv2f32( + , , iXLen); @@ -302,6 +317,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsqrt.nxv2f32( + undef, %0, iXLen %1) @@ -332,6 +348,7 @@ } declare @llvm.riscv.vfsqrt.nxv4f32( + , , iXLen); @@ -343,6 +360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsqrt.nxv4f32( + undef, %0, iXLen %1) @@ -373,6 +391,7 @@ } declare @llvm.riscv.vfsqrt.nxv8f32( + , , iXLen); @@ -384,6 +403,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsqrt.nxv8f32( + undef, %0, iXLen %1) @@ -414,6 +434,7 @@ } declare @llvm.riscv.vfsqrt.nxv16f32( + , , iXLen); @@ -425,6 +446,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsqrt.nxv16f32( + undef, %0, iXLen %1) @@ -455,6 +477,7 @@ } declare @llvm.riscv.vfsqrt.nxv1f64( + , , iXLen); @@ -466,6 +489,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsqrt.nxv1f64( + undef, %0, iXLen %1) @@ -496,6 +520,7 @@ } declare @llvm.riscv.vfsqrt.nxv2f64( + , , iXLen); @@ -507,6 +532,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsqrt.nxv2f64( + undef, %0, iXLen %1) @@ -537,6 +563,7 @@ } declare @llvm.riscv.vfsqrt.nxv4f64( + , , iXLen); @@ -548,6 +575,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsqrt.nxv4f64( + undef, %0, iXLen %1) @@ -578,6 +606,7 @@ } declare @llvm.riscv.vfsqrt.nxv8f64( + , , iXLen); @@ -589,6 +618,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsqrt.nxv8f64( + undef, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16( + , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16( + undef, %0, iXLen %1) @@ -46,6 +48,7 @@ } declare @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16( + , , iXLen); @@ -58,6 +61,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16( + undef, %0, iXLen %1) @@ -88,6 +92,7 @@ } declare @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16( + , , iXLen); @@ -100,6 +105,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16( + undef, %0, iXLen %1) @@ -130,6 +136,7 @@ } declare @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16( + , , iXLen); @@ -142,6 +149,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16( + undef, %0, iXLen %1) @@ -172,6 +180,7 @@ } declare @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16( + , , iXLen); @@ -184,6 +193,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16( + undef, %0, iXLen %1) @@ -214,6 +224,7 @@ } declare @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32( + , , iXLen); @@ -226,6 +237,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32( + undef, %0, iXLen %1) @@ -256,6 +268,7 @@ } declare @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32( + , , iXLen); @@ -268,6 +281,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32( + undef, %0, iXLen %1) @@ -298,6 +312,7 @@ } declare @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32( + , , iXLen); @@ -310,6 +325,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32( + undef, %0, iXLen %1) @@ -340,6 +356,7 @@ } declare @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32( + , , iXLen); @@ -352,6 +369,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32( + undef, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8( + , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8( + undef, %0, iXLen %1) @@ -46,6 +48,7 @@ } declare @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8( + , , iXLen); @@ -58,6 +61,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8( + undef, %0, iXLen %1) @@ -88,6 +92,7 @@ } declare @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8( + , , iXLen); @@ -100,6 +105,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8( + undef, %0, iXLen %1) @@ -130,6 +136,7 @@ } declare @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8( + , , iXLen); @@ -142,6 +149,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8( + undef, %0, iXLen %1) @@ -172,6 +180,7 @@ } declare @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8( + , , iXLen); @@ -184,6 +193,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8( + undef, %0, iXLen %1) @@ -214,6 +224,7 @@ } declare @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8( + , , iXLen); @@ -226,6 +237,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8( + undef, %0, iXLen %1) @@ -256,6 +268,7 @@ } declare @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16( + , , iXLen); @@ -268,6 +281,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16( + undef, %0, iXLen %1) @@ -298,6 +312,7 @@ } declare @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16( + , , iXLen); @@ -310,6 +325,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16( + undef, %0, iXLen %1) @@ -340,6 +356,7 @@ } declare @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16( + , , iXLen); @@ -352,6 +369,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16( + undef, %0, iXLen %1) @@ -382,6 +400,7 @@ } declare @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16( + , , iXLen); @@ -394,6 +413,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16( + undef, %0, iXLen %1) @@ -424,6 +444,7 @@ } declare @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16( + , , iXLen); @@ -436,6 +457,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16( + undef, %0, iXLen %1) @@ -466,6 +488,7 @@ } declare @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32( + , , iXLen); @@ -478,6 +501,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32( + undef, %0, iXLen %1) @@ -508,6 +532,7 @@ } declare @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32( + , , iXLen); @@ -520,6 +545,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32( + undef, %0, iXLen %1) @@ -550,6 +576,7 @@ } declare @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32( + , , iXLen); @@ -562,6 +589,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32( + undef, %0, iXLen %1) @@ -592,6 +620,7 @@ } declare @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32( + , , iXLen); @@ -604,6 +633,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32( + undef, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8( + , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8( + undef, %0, iXLen %1) @@ -46,6 +48,7 @@ } declare @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8( + , , iXLen); @@ -58,6 +61,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8( + undef, %0, iXLen %1) @@ -88,6 +92,7 @@ } declare @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8( + , , iXLen); @@ -100,6 +105,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8( + undef, %0, iXLen %1) @@ -130,6 +136,7 @@ } declare @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8( + , , iXLen); @@ -142,6 +149,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8( + undef, %0, iXLen %1) @@ -172,6 +180,7 @@ } declare @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8( + , , iXLen); @@ -184,6 +193,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8( + undef, %0, iXLen %1) @@ -214,6 +224,7 @@ } declare @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8( + , , iXLen); @@ -226,6 +237,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8( + undef, %0, iXLen %1) @@ -256,6 +268,7 @@ } declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16( + , , iXLen); @@ -268,6 +281,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16( + undef, %0, iXLen %1) @@ -298,6 +312,7 @@ } declare @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16( + , , iXLen); @@ -310,6 +325,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16( + undef, %0, iXLen %1) @@ -340,6 +356,7 @@ } declare @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16( + , , iXLen); @@ -352,6 +369,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16( + undef, %0, iXLen %1) @@ -382,6 +400,7 @@ } declare @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16( + , , iXLen); @@ -394,6 +413,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16( + undef, %0, iXLen %1) @@ -424,6 +444,7 @@ } declare @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16( + , , iXLen); @@ -436,6 +457,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16( + undef, %0, iXLen %1) @@ -466,6 +488,7 @@ } declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32( + , , iXLen); @@ -478,6 +501,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32( + undef, %0, iXLen %1) @@ -508,6 +532,7 @@ } declare @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32( + , , iXLen); @@ -520,6 +545,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32( + undef, %0, iXLen %1) @@ -550,6 +576,7 @@ } declare @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32( + , , iXLen); @@ -562,6 +589,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32( + undef, %0, iXLen %1) @@ -592,6 +620,7 @@ } declare @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32( + , , iXLen); @@ -604,6 +633,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32( + undef, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16( + , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16( + undef, %0, iXLen %1) @@ -46,6 +48,7 @@ } declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16( + , , iXLen); @@ -58,6 +61,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16( + undef, %0, iXLen %1) @@ -88,6 +92,7 @@ } declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16( + , , iXLen); @@ -100,6 +105,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16( + undef, %0, iXLen %1) @@ -130,6 +136,7 @@ } declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16( + , , iXLen); @@ -142,6 +149,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16( + undef, %0, iXLen %1) @@ -172,6 +180,7 @@ } declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16( + , , iXLen); @@ -184,6 +193,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16( + undef, %0, iXLen %1) @@ -214,6 +224,7 @@ } declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32( + , , iXLen); @@ -226,6 +237,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32( + undef, %0, iXLen %1) @@ -256,6 +268,7 @@ } declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32( + , , iXLen); @@ -268,6 +281,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32( + undef, %0, iXLen %1) @@ -298,6 +312,7 @@ } declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32( + , , iXLen); @@ -310,6 +325,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32( + undef, %0, iXLen %1) @@ -340,6 +356,7 @@ } declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32( + , , iXLen); @@ -352,6 +369,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32( + undef, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16( + , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16( + undef, %0, iXLen %1) @@ -46,6 +48,7 @@ } declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16( + , , iXLen); @@ -58,6 +61,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16( + undef, %0, iXLen %1) @@ -88,6 +92,7 @@ } declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16( + , , iXLen); @@ -100,6 +105,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16( + undef, %0, iXLen %1) @@ -130,6 +136,7 @@ } declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16( + , , iXLen); @@ -142,6 +149,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16( + undef, %0, iXLen %1) @@ -172,6 +180,7 @@ } declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16( + , , iXLen); @@ -184,6 +193,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16( + undef, %0, iXLen %1) @@ -214,6 +224,7 @@ } declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32( + , , iXLen); @@ -226,6 +237,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32( + undef, %0, iXLen %1) @@ -256,6 +268,7 @@ } declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32( + , , iXLen); @@ -268,6 +281,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32( + undef, %0, iXLen %1) @@ -298,6 +312,7 @@ } declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32( + , , iXLen); @@ -310,6 +325,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32( + undef, %0, iXLen %1) @@ -340,6 +356,7 @@ } declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32( + , , iXLen); @@ -352,6 +369,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32( + undef, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16( + , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16( + undef, %0, iXLen %1) @@ -46,6 +48,7 @@ } declare @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16( + , , iXLen); @@ -58,6 +61,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16( + undef, %0, iXLen %1) @@ -88,6 +92,7 @@ } declare @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16( + , , iXLen); @@ -100,6 +105,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16( + undef, %0, iXLen %1) @@ -130,6 +136,7 @@ } declare @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16( + , , iXLen); @@ -142,6 +149,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16( + undef, %0, iXLen %1) @@ -172,6 +180,7 @@ } declare @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16( + , , iXLen); @@ -184,6 +193,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16( + undef, %0, iXLen %1) @@ -214,6 +224,7 @@ } declare @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32( + , , iXLen); @@ -226,6 +237,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32( + undef, %0, iXLen %1) @@ -256,6 +268,7 @@ } declare @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32( + , , iXLen); @@ -268,6 +281,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32( + undef, %0, iXLen %1) @@ -298,6 +312,7 @@ } declare @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32( + , , iXLen); @@ -310,6 +325,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32( + undef, %0, iXLen %1) @@ -340,6 +356,7 @@ } declare @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32( + , , iXLen); @@ -352,6 +369,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32( + undef, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16( + , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16( + undef, %0, iXLen %1) @@ -46,6 +48,7 @@ } declare @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16( + , , iXLen); @@ -58,6 +61,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16( + undef, %0, iXLen %1) @@ -88,6 +92,7 @@ } declare @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16( + , , iXLen); @@ -100,6 +105,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16( + undef, %0, iXLen %1) @@ -130,6 +136,7 @@ } declare @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16( + , , iXLen); @@ -142,6 +149,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16( + undef, %0, iXLen %1) @@ -172,6 +180,7 @@ } declare @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16( + , , iXLen); @@ -184,6 +193,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16( + undef, %0, iXLen %1) @@ -214,6 +224,7 @@ } declare @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32( + , , iXLen); @@ -226,6 +237,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32( + undef, %0, iXLen %1) @@ -256,6 +268,7 @@ } declare @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32( + , , iXLen); @@ -268,6 +281,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32( + undef, %0, iXLen %1) @@ -298,6 +312,7 @@ } declare @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32( + , , iXLen); @@ -310,6 +325,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32( + undef, %0, iXLen %1) @@ -340,6 +356,7 @@ } declare @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32( + , , iXLen); @@ -352,6 +369,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32( + undef, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vid.ll b/llvm/test/CodeGen/RISCV/rvv/vid.ll --- a/llvm/test/CodeGen/RISCV/rvv/vid.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vid.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s declare @llvm.riscv.vid.nxv1i8( + , iXLen); define @intrinsic_vid_v_nxv1i8(iXLen %0) nounwind { @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv1i8( + undef, iXLen %0) ret %a @@ -40,6 +42,7 @@ } declare @llvm.riscv.vid.nxv2i8( + , iXLen); define @intrinsic_vid_v_nxv2i8(iXLen %0) nounwind { @@ -50,6 +53,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv2i8( + undef, iXLen %0) ret %a @@ -76,6 +80,7 @@ } declare @llvm.riscv.vid.nxv4i8( + , iXLen); define @intrinsic_vid_v_nxv4i8(iXLen %0) nounwind { @@ -86,6 +91,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv4i8( + undef, iXLen %0) ret %a @@ -112,6 +118,7 @@ } declare @llvm.riscv.vid.nxv8i8( + , iXLen); define @intrinsic_vid_v_nxv8i8(iXLen %0) nounwind { @@ -122,6 +129,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv8i8( + undef, iXLen %0) ret %a @@ -148,6 +156,7 @@ } declare @llvm.riscv.vid.nxv16i8( + , iXLen); define @intrinsic_vid_v_nxv16i8(iXLen %0) nounwind { @@ -158,6 +167,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv16i8( + undef, iXLen %0) ret %a @@ -184,6 +194,7 @@ } declare @llvm.riscv.vid.nxv32i8( + , iXLen); define @intrinsic_vid_v_nxv32i8(iXLen %0) nounwind { @@ -194,6 +205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv32i8( + undef, iXLen %0) ret %a @@ -220,6 +232,7 @@ } declare @llvm.riscv.vid.nxv1i16( + , iXLen); define @intrinsic_vid_v_nxv1i16(iXLen %0) nounwind { @@ -230,6 +243,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv1i16( + undef, iXLen %0) ret %a @@ -256,6 +270,7 @@ } declare @llvm.riscv.vid.nxv2i16( + , iXLen); define @intrinsic_vid_v_nxv2i16(iXLen %0) nounwind { @@ -266,6 +281,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv2i16( + undef, iXLen %0) ret %a @@ -292,6 +308,7 @@ } declare @llvm.riscv.vid.nxv4i16( + , iXLen); define @intrinsic_vid_v_nxv4i16(iXLen %0) nounwind { @@ -302,6 +319,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv4i16( + undef, iXLen %0) ret %a @@ -328,6 +346,7 @@ } declare @llvm.riscv.vid.nxv8i16( + , iXLen); define @intrinsic_vid_v_nxv8i16(iXLen %0) nounwind { @@ -338,6 +357,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv8i16( + undef, iXLen %0) ret %a @@ -364,6 +384,7 @@ } declare @llvm.riscv.vid.nxv16i16( + , iXLen); define @intrinsic_vid_v_nxv16i16(iXLen %0) nounwind { @@ -374,6 +395,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv16i16( + undef, iXLen %0) ret %a @@ -400,6 +422,7 @@ } declare @llvm.riscv.vid.nxv32i16( + , iXLen); define @intrinsic_vid_v_nxv32i16(iXLen %0) nounwind { @@ -410,6 +433,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv32i16( + undef, iXLen %0) ret %a @@ -436,6 +460,7 @@ } declare @llvm.riscv.vid.nxv1i32( + , iXLen); define @intrinsic_vid_v_nxv1i32(iXLen %0) nounwind { @@ -446,6 +471,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv1i32( + undef, iXLen %0) ret %a @@ -472,6 +498,7 @@ } declare @llvm.riscv.vid.nxv2i32( + , iXLen); define @intrinsic_vid_v_nxv2i32(iXLen %0) nounwind { @@ -482,6 +509,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv2i32( + undef, iXLen %0) ret %a @@ -508,6 +536,7 @@ } declare @llvm.riscv.vid.nxv4i32( + , iXLen); define @intrinsic_vid_v_nxv4i32(iXLen %0) nounwind { @@ -518,6 +547,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv4i32( + undef, iXLen %0) ret %a @@ -544,6 +574,7 @@ } declare @llvm.riscv.vid.nxv8i32( + , iXLen); define @intrinsic_vid_v_nxv8i32(iXLen %0) nounwind { @@ -554,6 +585,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv8i32( + undef, iXLen %0) ret %a @@ -580,6 +612,7 @@ } declare @llvm.riscv.vid.nxv16i32( + , iXLen); define @intrinsic_vid_v_nxv16i32(iXLen %0) nounwind { @@ -590,6 +623,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv16i32( + undef, iXLen %0) ret %a @@ -616,6 +650,7 @@ } declare @llvm.riscv.vid.nxv1i64( + , iXLen); define @intrinsic_vid_v_nxv1i64(iXLen %0) nounwind { @@ -626,6 +661,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv1i64( + undef, iXLen %0) ret %a @@ -652,6 +688,7 @@ } declare @llvm.riscv.vid.nxv2i64( + , iXLen); define @intrinsic_vid_v_nxv2i64(iXLen %0) nounwind { @@ -662,6 +699,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv2i64( + undef, iXLen %0) ret %a @@ -688,6 +726,7 @@ } declare @llvm.riscv.vid.nxv4i64( + , iXLen); define @intrinsic_vid_v_nxv4i64(iXLen %0) nounwind { @@ -698,6 +737,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv4i64( + undef, iXLen %0) ret %a @@ -724,6 +764,7 @@ } declare @llvm.riscv.vid.nxv8i64( + , iXLen); define @intrinsic_vid_v_nxv8i64(iXLen %0) nounwind { @@ -734,6 +775,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv8i64( + undef, iXLen %0) ret %a diff --git a/llvm/test/CodeGen/RISCV/rvv/viota.ll b/llvm/test/CodeGen/RISCV/rvv/viota.ll --- a/llvm/test/CodeGen/RISCV/rvv/viota.ll +++ b/llvm/test/CodeGen/RISCV/rvv/viota.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s declare @llvm.riscv.viota.nxv1i8( + , , iXLen); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.viota.nxv1i8( + undef, %0, iXLen %1) @@ -44,6 +46,7 @@ } declare @llvm.riscv.viota.nxv2i8( + , , iXLen); @@ -55,6 +58,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.viota.nxv2i8( + undef, %0, iXLen %1) @@ -84,6 +88,7 @@ } declare @llvm.riscv.viota.nxv4i8( + , , iXLen); @@ -95,6 +100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.viota.nxv4i8( + undef, %0, iXLen %1) @@ -124,6 +130,7 @@ } declare @llvm.riscv.viota.nxv8i8( + , , iXLen); @@ -135,6 +142,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.viota.nxv8i8( + undef, %0, iXLen %1) @@ -164,6 +172,7 @@ } declare @llvm.riscv.viota.nxv16i8( + , , iXLen); @@ -175,6 +184,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.viota.nxv16i8( + undef, %0, iXLen %1) @@ -204,6 +214,7 @@ } declare @llvm.riscv.viota.nxv32i8( + , , iXLen); @@ -215,6 +226,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.viota.nxv32i8( + undef, %0, iXLen %1) @@ -244,6 +256,7 @@ } declare @llvm.riscv.viota.nxv64i8( + , , iXLen); @@ -255,6 +268,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.viota.nxv64i8( + undef, %0, iXLen %1) @@ -284,6 +298,7 @@ } declare @llvm.riscv.viota.nxv1i16( + , , iXLen); @@ -295,6 +310,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.viota.nxv1i16( + undef, %0, iXLen %1) @@ -324,6 +340,7 @@ } declare @llvm.riscv.viota.nxv2i16( + , , iXLen); @@ -335,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.viota.nxv2i16( + undef, %0, iXLen %1) @@ -364,6 +382,7 @@ } declare @llvm.riscv.viota.nxv4i16( + , , iXLen); @@ -375,6 +394,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.viota.nxv4i16( + undef, %0, iXLen %1) @@ -404,6 +424,7 @@ } declare @llvm.riscv.viota.nxv8i16( + , , iXLen); @@ -415,6 +436,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.viota.nxv8i16( + undef, %0, iXLen %1) @@ -444,6 +466,7 @@ } declare @llvm.riscv.viota.nxv16i16( + , , iXLen); @@ -455,6 +478,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.viota.nxv16i16( + undef, %0, iXLen %1) @@ -484,6 +508,7 @@ } declare @llvm.riscv.viota.nxv32i16( + , , iXLen); @@ -495,6 +520,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.viota.nxv32i16( + undef, %0, iXLen %1) @@ -524,6 +550,7 @@ } declare @llvm.riscv.viota.nxv1i32( + , , iXLen); @@ -535,6 +562,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.viota.nxv1i32( + undef, %0, iXLen %1) @@ -564,6 +592,7 @@ } declare @llvm.riscv.viota.nxv2i32( + , , iXLen); @@ -575,6 +604,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.viota.nxv2i32( + undef, %0, iXLen %1) @@ -604,6 +634,7 @@ } declare @llvm.riscv.viota.nxv4i32( + , , iXLen); @@ -615,6 +646,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.viota.nxv4i32( + undef, %0, iXLen %1) @@ -644,6 +676,7 @@ } declare @llvm.riscv.viota.nxv8i32( + , , iXLen); @@ -655,6 +688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.viota.nxv8i32( + undef, %0, iXLen %1) @@ -684,6 +718,7 @@ } declare @llvm.riscv.viota.nxv16i32( + , , iXLen); @@ -695,6 +730,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.viota.nxv16i32( + undef, %0, iXLen %1) @@ -724,6 +760,7 @@ } declare @llvm.riscv.viota.nxv1i64( + , , iXLen); @@ -735,6 +772,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.viota.nxv1i64( + undef, %0, iXLen %1) @@ -764,6 +802,7 @@ } declare @llvm.riscv.viota.nxv2i64( + , , iXLen); @@ -775,6 +814,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.viota.nxv2i64( + undef, %0, iXLen %1) @@ -804,6 +844,7 @@ } declare @llvm.riscv.viota.nxv4i64( + , , iXLen); @@ -815,6 +856,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.viota.nxv4i64( + undef, %0, iXLen %1) @@ -844,6 +886,7 @@ } declare @llvm.riscv.viota.nxv8i64( + , , iXLen); @@ -855,6 +898,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.viota.nxv8i64( + undef, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir @@ -34,11 +34,11 @@ br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %b = call @llvm.riscv.vzext.nxv1i64.nxv1i32.i64( %a, i64 %2) + %b = call @llvm.riscv.vzext.nxv1i64.nxv1i32.i64( undef, %a, i64 %2) br label %if.end if.else: ; preds = %entry - %c = call @llvm.riscv.vsext.nxv1i64.nxv1i32.i64( %a, i64 %2) + %c = call @llvm.riscv.vsext.nxv1i64.nxv1i32.i64( undef, %a, i64 %2) br label %if.end if.end: ; preds = %if.else, %if.then @@ -140,10 +140,10 @@ declare void @llvm.riscv.vse.nxv1i64.i64(, * nocapture, i64) #4 ; Function Attrs: nounwind readnone - declare @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(, i64) #1 + declare @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(, , i64) #1 ; Function Attrs: nounwind readnone - declare @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(, i64) #1 + declare @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(, , i64) #1 attributes #0 = { "target-features"="+v" } attributes #1 = { nounwind readnone } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir @@ -24,7 +24,7 @@ define @load_zext(* %0, i64 %1) #0 { entry: %a = call @llvm.riscv.vle.nxv1i32.i64( undef, * %0, i64 %1) - %b = call @llvm.riscv.vzext.nxv1i64.nxv1i32.i64( %a, i64 %1) + %b = call @llvm.riscv.vzext.nxv1i64.nxv1i32.i64( undef, %a, i64 %1) ret %b } @@ -82,7 +82,7 @@ declare @llvm.riscv.vle.nxv1i32.i64(, * nocapture, i64) #4 ; Function Attrs: nounwind readnone - declare @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(, i64) #1 + declare @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(, , i64) #1 attributes #0 = { "target-features"="+v" } attributes #1 = { nounwind readnone } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsext.nxv1i64.nxv1i8( + , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv1i64.nxv1i8( + undef, %0, i32 %1) @@ -44,6 +46,7 @@ } declare @llvm.riscv.vsext.nxv2i64.nxv2i8( + , , i32); @@ -56,6 +59,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i64.nxv2i8( + undef, %0, i32 %1) @@ -86,6 +90,7 @@ } declare @llvm.riscv.vsext.nxv4i64.nxv4i8( + , , i32); @@ -98,6 +103,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i64.nxv4i8( + undef, %0, i32 %1) @@ -128,6 +134,7 @@ } declare @llvm.riscv.vsext.nxv8i64.nxv8i8( + , , i32); @@ -140,6 +147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i64.nxv8i8( + undef, %0, i32 %1) @@ -170,6 +178,7 @@ } declare @llvm.riscv.vsext.nxv1i64.nxv1i16( + , , i32); @@ -182,6 +191,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv1i64.nxv1i16( + undef, %0, i32 %1) @@ -212,6 +222,7 @@ } declare @llvm.riscv.vsext.nxv2i64.nxv2i16( + , , i32); @@ -224,6 +235,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i64.nxv2i16( + undef, %0, i32 %1) @@ -254,6 +266,7 @@ } declare @llvm.riscv.vsext.nxv4i64.nxv4i16( + , , i32); @@ -266,6 +279,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i64.nxv4i16( + undef, %0, i32 %1) @@ -296,6 +310,7 @@ } declare @llvm.riscv.vsext.nxv8i64.nxv8i16( + , , i32); @@ -308,6 +323,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i64.nxv8i16( + undef, %0, i32 %1) @@ -338,6 +354,7 @@ } declare @llvm.riscv.vsext.nxv1i32.nxv1i8( + , , i32); @@ -350,6 +367,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv1i32.nxv1i8( + undef, %0, i32 %1) @@ -380,6 +398,7 @@ } declare @llvm.riscv.vsext.nxv2i32.nxv2i8( + , , i32); @@ -392,6 +411,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i32.nxv2i8( + undef, %0, i32 %1) @@ -422,6 +442,7 @@ } declare @llvm.riscv.vsext.nxv4i32.nxv4i8( + , , i32); @@ -434,6 +455,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i32.nxv4i8( + undef, %0, i32 %1) @@ -464,6 +486,7 @@ } declare @llvm.riscv.vsext.nxv8i32.nxv8i8( + , , i32); @@ -476,6 +499,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i32.nxv8i8( + undef, %0, i32 %1) @@ -506,6 +530,7 @@ } declare @llvm.riscv.vsext.nxv16i32.nxv16i8( + , , i32); @@ -518,6 +543,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv16i32.nxv16i8( + undef, %0, i32 %1) @@ -548,6 +574,7 @@ } declare @llvm.riscv.vsext.nxv1i32.nxv1i16( + , , i32); @@ -560,6 +587,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv1i32.nxv1i16( + undef, %0, i32 %1) @@ -590,6 +618,7 @@ } declare @llvm.riscv.vsext.nxv2i32.nxv2i16( + , , i32); @@ -602,6 +631,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i32.nxv2i16( + undef, %0, i32 %1) @@ -632,6 +662,7 @@ } declare @llvm.riscv.vsext.nxv4i32.nxv4i16( + , , i32); @@ -644,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i32.nxv4i16( + undef, %0, i32 %1) @@ -674,6 +706,7 @@ } declare @llvm.riscv.vsext.nxv8i32.nxv8i16( + , , i32); @@ -686,6 +719,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i32.nxv8i16( + undef, %0, i32 %1) @@ -716,6 +750,7 @@ } declare @llvm.riscv.vsext.nxv16i32.nxv16i16( + , , i32); @@ -728,6 +763,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv16i32.nxv16i16( + undef, %0, i32 %1) @@ -758,6 +794,7 @@ } declare @llvm.riscv.vsext.nxv1i16.nxv1i8( + , , i32); @@ -770,6 +807,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv1i16.nxv1i8( + undef, %0, i32 %1) @@ -800,6 +838,7 @@ } declare @llvm.riscv.vsext.nxv2i16.nxv2i8( + , , i32); @@ -812,6 +851,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i16.nxv2i8( + undef, %0, i32 %1) @@ -842,6 +882,7 @@ } declare @llvm.riscv.vsext.nxv4i16.nxv4i8( + , , i32); @@ -854,6 +895,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i16.nxv4i8( + undef, %0, i32 %1) @@ -884,6 +926,7 @@ } declare @llvm.riscv.vsext.nxv8i16.nxv8i8( + , , i32); @@ -896,6 +939,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i16.nxv8i8( + undef, %0, i32 %1) @@ -926,6 +970,7 @@ } declare @llvm.riscv.vsext.nxv16i16.nxv16i8( + , , i32); @@ -938,6 +983,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv16i16.nxv16i8( + undef, %0, i32 %1) @@ -968,6 +1014,7 @@ } declare @llvm.riscv.vsext.nxv32i16.nxv32i8( + , , i32); @@ -980,6 +1027,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv32i16.nxv32i8( + undef, %0, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsext.nxv1i64.nxv1i8( + , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv1i64.nxv1i8( + undef, %0, i64 %1) @@ -44,6 +46,7 @@ } declare @llvm.riscv.vsext.nxv2i64.nxv2i8( + , , i64); @@ -56,6 +59,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i64.nxv2i8( + undef, %0, i64 %1) @@ -86,6 +90,7 @@ } declare @llvm.riscv.vsext.nxv4i64.nxv4i8( + , , i64); @@ -98,6 +103,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i64.nxv4i8( + undef, %0, i64 %1) @@ -128,6 +134,7 @@ } declare @llvm.riscv.vsext.nxv8i64.nxv8i8( + , , i64); @@ -140,6 +147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i64.nxv8i8( + undef, %0, i64 %1) @@ -170,6 +178,7 @@ } declare @llvm.riscv.vsext.nxv1i64.nxv1i16( + , , i64); @@ -182,6 +191,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv1i64.nxv1i16( + undef, %0, i64 %1) @@ -212,6 +222,7 @@ } declare @llvm.riscv.vsext.nxv2i64.nxv2i16( + , , i64); @@ -224,6 +235,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i64.nxv2i16( + undef, %0, i64 %1) @@ -254,6 +266,7 @@ } declare @llvm.riscv.vsext.nxv4i64.nxv4i16( + , , i64); @@ -266,6 +279,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i64.nxv4i16( + undef, %0, i64 %1) @@ -296,6 +310,7 @@ } declare @llvm.riscv.vsext.nxv8i64.nxv8i16( + , , i64); @@ -308,6 +323,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i64.nxv8i16( + undef, %0, i64 %1) @@ -338,6 +354,7 @@ } declare @llvm.riscv.vsext.nxv1i32.nxv1i8( + , , i64); @@ -350,6 +367,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv1i32.nxv1i8( + undef, %0, i64 %1) @@ -380,6 +398,7 @@ } declare @llvm.riscv.vsext.nxv2i32.nxv2i8( + , , i64); @@ -392,6 +411,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i32.nxv2i8( + undef, %0, i64 %1) @@ -422,6 +442,7 @@ } declare @llvm.riscv.vsext.nxv4i32.nxv4i8( + , , i64); @@ -434,6 +455,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i32.nxv4i8( + undef, %0, i64 %1) @@ -464,6 +486,7 @@ } declare @llvm.riscv.vsext.nxv8i32.nxv8i8( + , , i64); @@ -476,6 +499,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i32.nxv8i8( + undef, %0, i64 %1) @@ -506,6 +530,7 @@ } declare @llvm.riscv.vsext.nxv16i32.nxv16i8( + , , i64); @@ -518,6 +543,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv16i32.nxv16i8( + undef, %0, i64 %1) @@ -548,6 +574,7 @@ } declare @llvm.riscv.vsext.nxv1i64.nxv1i32( + , , i64); @@ -560,6 +587,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv1i64.nxv1i32( + undef, %0, i64 %1) @@ -590,6 +618,7 @@ } declare @llvm.riscv.vsext.nxv2i64.nxv2i32( + , , i64); @@ -602,6 +631,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i64.nxv2i32( + undef, %0, i64 %1) @@ -632,6 +662,7 @@ } declare @llvm.riscv.vsext.nxv4i64.nxv4i32( + , , i64); @@ -644,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i64.nxv4i32( + undef, %0, i64 %1) @@ -674,6 +706,7 @@ } declare @llvm.riscv.vsext.nxv8i64.nxv8i32( + , , i64); @@ -686,6 +719,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i64.nxv8i32( + undef, %0, i64 %1) @@ -716,6 +750,7 @@ } declare @llvm.riscv.vsext.nxv1i32.nxv1i16( + , , i64); @@ -728,6 +763,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv1i32.nxv1i16( + undef, %0, i64 %1) @@ -758,6 +794,7 @@ } declare @llvm.riscv.vsext.nxv2i32.nxv2i16( + , , i64); @@ -770,6 +807,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i32.nxv2i16( + undef, %0, i64 %1) @@ -800,6 +838,7 @@ } declare @llvm.riscv.vsext.nxv4i32.nxv4i16( + , , i64); @@ -812,6 +851,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i32.nxv4i16( + undef, %0, i64 %1) @@ -842,6 +882,7 @@ } declare @llvm.riscv.vsext.nxv8i32.nxv8i16( + , , i64); @@ -854,6 +895,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i32.nxv8i16( + undef, %0, i64 %1) @@ -884,6 +926,7 @@ } declare @llvm.riscv.vsext.nxv16i32.nxv16i16( + , , i64); @@ -896,6 +939,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv16i32.nxv16i16( + undef, %0, i64 %1) @@ -926,6 +970,7 @@ } declare @llvm.riscv.vsext.nxv1i16.nxv1i8( + , , i64); @@ -938,6 +983,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv1i16.nxv1i8( + undef, %0, i64 %1) @@ -968,6 +1014,7 @@ } declare @llvm.riscv.vsext.nxv2i16.nxv2i8( + , , i64); @@ -980,6 +1027,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i16.nxv2i8( + undef, %0, i64 %1) @@ -1010,6 +1058,7 @@ } declare @llvm.riscv.vsext.nxv4i16.nxv4i8( + , , i64); @@ -1022,6 +1071,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i16.nxv4i8( + undef, %0, i64 %1) @@ -1052,6 +1102,7 @@ } declare @llvm.riscv.vsext.nxv8i16.nxv8i8( + , , i64); @@ -1064,6 +1115,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i16.nxv8i8( + undef, %0, i64 %1) @@ -1094,6 +1146,7 @@ } declare @llvm.riscv.vsext.nxv16i16.nxv16i8( + , , i64); @@ -1106,6 +1159,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv16i16.nxv16i8( + undef, %0, i64 %1) @@ -1136,6 +1190,7 @@ } declare @llvm.riscv.vsext.nxv32i16.nxv32i8( + , , i64); @@ -1148,6 +1203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv32i16.nxv32i8( + undef, %0, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vzext.nxv1i64.nxv1i8( + , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv1i64.nxv1i8( + undef, %0, i32 %1) @@ -44,6 +46,7 @@ } declare @llvm.riscv.vzext.nxv2i64.nxv2i8( + , , i32); @@ -56,6 +59,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i64.nxv2i8( + undef, %0, i32 %1) @@ -86,6 +90,7 @@ } declare @llvm.riscv.vzext.nxv4i64.nxv4i8( + , , i32); @@ -98,6 +103,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i64.nxv4i8( + undef, %0, i32 %1) @@ -128,6 +134,7 @@ } declare @llvm.riscv.vzext.nxv8i64.nxv8i8( + , , i32); @@ -140,6 +147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i64.nxv8i8( + undef, %0, i32 %1) @@ -170,6 +178,7 @@ } declare @llvm.riscv.vzext.nxv1i64.nxv1i16( + , , i32); @@ -182,6 +191,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv1i64.nxv1i16( + undef, %0, i32 %1) @@ -212,6 +222,7 @@ } declare @llvm.riscv.vzext.nxv2i64.nxv2i16( + , , i32); @@ -224,6 +235,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i64.nxv2i16( + undef, %0, i32 %1) @@ -254,6 +266,7 @@ } declare @llvm.riscv.vzext.nxv4i64.nxv4i16( + , , i32); @@ -266,6 +279,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i64.nxv4i16( + undef, %0, i32 %1) @@ -296,6 +310,7 @@ } declare @llvm.riscv.vzext.nxv8i64.nxv8i16( + , , i32); @@ -308,6 +323,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i64.nxv8i16( + undef, %0, i32 %1) @@ -338,6 +354,7 @@ } declare @llvm.riscv.vzext.nxv1i32.nxv1i8( + , , i32); @@ -350,6 +367,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv1i32.nxv1i8( + undef, %0, i32 %1) @@ -380,6 +398,7 @@ } declare @llvm.riscv.vzext.nxv2i32.nxv2i8( + , , i32); @@ -392,6 +411,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i32.nxv2i8( + undef, %0, i32 %1) @@ -422,6 +442,7 @@ } declare @llvm.riscv.vzext.nxv4i32.nxv4i8( + , , i32); @@ -434,6 +455,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i32.nxv4i8( + undef, %0, i32 %1) @@ -464,6 +486,7 @@ } declare @llvm.riscv.vzext.nxv8i32.nxv8i8( + , , i32); @@ -476,6 +499,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i32.nxv8i8( + undef, %0, i32 %1) @@ -506,6 +530,7 @@ } declare @llvm.riscv.vzext.nxv16i32.nxv16i8( + , , i32); @@ -518,6 +543,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv16i32.nxv16i8( + undef, %0, i32 %1) @@ -548,6 +574,7 @@ } declare @llvm.riscv.vzext.nxv1i64.nxv1i32( + , , i32); @@ -560,6 +587,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv1i64.nxv1i32( + undef, %0, i32 %1) @@ -590,6 +618,7 @@ } declare @llvm.riscv.vzext.nxv2i64.nxv2i32( + , , i32); @@ -602,6 +631,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i64.nxv2i32( + undef, %0, i32 %1) @@ -632,6 +662,7 @@ } declare @llvm.riscv.vzext.nxv4i64.nxv4i32( + , , i32); @@ -644,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i64.nxv4i32( + undef, %0, i32 %1) @@ -674,6 +706,7 @@ } declare @llvm.riscv.vzext.nxv8i64.nxv8i32( + , , i32); @@ -686,6 +719,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i64.nxv8i32( + undef, %0, i32 %1) @@ -716,6 +750,7 @@ } declare @llvm.riscv.vzext.nxv1i32.nxv1i16( + , , i32); @@ -728,6 +763,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv1i32.nxv1i16( + undef, %0, i32 %1) @@ -758,6 +794,7 @@ } declare @llvm.riscv.vzext.nxv2i32.nxv2i16( + , , i32); @@ -770,6 +807,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i32.nxv2i16( + undef, %0, i32 %1) @@ -800,6 +838,7 @@ } declare @llvm.riscv.vzext.nxv4i32.nxv4i16( + , , i32); @@ -812,6 +851,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i32.nxv4i16( + undef, %0, i32 %1) @@ -842,6 +882,7 @@ } declare @llvm.riscv.vzext.nxv8i32.nxv8i16( + , , i32); @@ -854,6 +895,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i32.nxv8i16( + undef, %0, i32 %1) @@ -884,6 +926,7 @@ } declare @llvm.riscv.vzext.nxv16i32.nxv16i16( + , , i32); @@ -896,6 +939,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv16i32.nxv16i16( + undef, %0, i32 %1) @@ -926,6 +970,7 @@ } declare @llvm.riscv.vzext.nxv1i16.nxv1i8( + , , i32); @@ -938,6 +983,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv1i16.nxv1i8( + undef, %0, i32 %1) @@ -968,6 +1014,7 @@ } declare @llvm.riscv.vzext.nxv2i16.nxv2i8( + , , i32); @@ -980,6 +1027,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i16.nxv2i8( + undef, %0, i32 %1) @@ -1010,6 +1058,7 @@ } declare @llvm.riscv.vzext.nxv4i16.nxv4i8( + , , i32); @@ -1022,6 +1071,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i16.nxv4i8( + undef, %0, i32 %1) @@ -1052,6 +1102,7 @@ } declare @llvm.riscv.vzext.nxv8i16.nxv8i8( + , , i32); @@ -1064,6 +1115,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i16.nxv8i8( + undef, %0, i32 %1) @@ -1094,6 +1146,7 @@ } declare @llvm.riscv.vzext.nxv16i16.nxv16i8( + , , i32); @@ -1106,6 +1159,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv16i16.nxv16i8( + undef, %0, i32 %1) @@ -1136,6 +1190,7 @@ } declare @llvm.riscv.vzext.nxv32i16.nxv32i8( + , , i32); @@ -1148,6 +1203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv32i16.nxv32i8( + undef, %0, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vzext.nxv1i64.nxv1i8( + , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv1i64.nxv1i8( + undef, %0, i64 %1) @@ -44,6 +46,7 @@ } declare @llvm.riscv.vzext.nxv2i64.nxv2i8( + , , i64); @@ -56,6 +59,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i64.nxv2i8( + undef, %0, i64 %1) @@ -86,6 +90,7 @@ } declare @llvm.riscv.vzext.nxv4i64.nxv4i8( + , , i64); @@ -98,6 +103,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i64.nxv4i8( + undef, %0, i64 %1) @@ -128,6 +134,7 @@ } declare @llvm.riscv.vzext.nxv8i64.nxv8i8( + , , i64); @@ -140,6 +147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i64.nxv8i8( + undef, %0, i64 %1) @@ -170,6 +178,7 @@ } declare @llvm.riscv.vzext.nxv1i64.nxv1i16( + , , i64); @@ -182,6 +191,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv1i64.nxv1i16( + undef, %0, i64 %1) @@ -212,6 +222,7 @@ } declare @llvm.riscv.vzext.nxv2i64.nxv2i16( + , , i64); @@ -224,6 +235,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i64.nxv2i16( + undef, %0, i64 %1) @@ -254,6 +266,7 @@ } declare @llvm.riscv.vzext.nxv4i64.nxv4i16( + , , i64); @@ -266,6 +279,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i64.nxv4i16( + undef, %0, i64 %1) @@ -296,6 +310,7 @@ } declare @llvm.riscv.vzext.nxv8i64.nxv8i16( + , , i64); @@ -308,6 +323,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i64.nxv8i16( + undef, %0, i64 %1) @@ -338,6 +354,7 @@ } declare @llvm.riscv.vzext.nxv1i32.nxv1i8( + , , i64); @@ -350,6 +367,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv1i32.nxv1i8( + undef, %0, i64 %1) @@ -380,6 +398,7 @@ } declare @llvm.riscv.vzext.nxv2i32.nxv2i8( + , , i64); @@ -392,6 +411,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i32.nxv2i8( + undef, %0, i64 %1) @@ -422,6 +442,7 @@ } declare @llvm.riscv.vzext.nxv4i32.nxv4i8( + , , i64); @@ -434,6 +455,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i32.nxv4i8( + undef, %0, i64 %1) @@ -464,6 +486,7 @@ } declare @llvm.riscv.vzext.nxv8i32.nxv8i8( + , , i64); @@ -476,6 +499,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i32.nxv8i8( + undef, %0, i64 %1) @@ -506,6 +530,7 @@ } declare @llvm.riscv.vzext.nxv16i32.nxv16i8( + , , i64); @@ -518,6 +543,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv16i32.nxv16i8( + undef, %0, i64 %1) @@ -548,6 +574,7 @@ } declare @llvm.riscv.vzext.nxv1i64.nxv1i32( + , , i64); @@ -560,6 +587,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv1i64.nxv1i32( + undef, %0, i64 %1) @@ -590,6 +618,7 @@ } declare @llvm.riscv.vzext.nxv2i64.nxv2i32( + , , i64); @@ -602,6 +631,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i64.nxv2i32( + undef, %0, i64 %1) @@ -632,6 +662,7 @@ } declare @llvm.riscv.vzext.nxv4i64.nxv4i32( + , , i64); @@ -644,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i64.nxv4i32( + undef, %0, i64 %1) @@ -674,6 +706,7 @@ } declare @llvm.riscv.vzext.nxv8i64.nxv8i32( + , , i64); @@ -686,6 +719,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i64.nxv8i32( + undef, %0, i64 %1) @@ -716,6 +750,7 @@ } declare @llvm.riscv.vzext.nxv1i32.nxv1i16( + , , i64); @@ -728,6 +763,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv1i32.nxv1i16( + undef, %0, i64 %1) @@ -758,6 +794,7 @@ } declare @llvm.riscv.vzext.nxv2i32.nxv2i16( + , , i64); @@ -770,6 +807,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i32.nxv2i16( + undef, %0, i64 %1) @@ -800,6 +838,7 @@ } declare @llvm.riscv.vzext.nxv4i32.nxv4i16( + , , i64); @@ -812,6 +851,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i32.nxv4i16( + undef, %0, i64 %1) @@ -842,6 +882,7 @@ } declare @llvm.riscv.vzext.nxv8i32.nxv8i16( + , , i64); @@ -854,6 +895,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i32.nxv8i16( + undef, %0, i64 %1) @@ -884,6 +926,7 @@ } declare @llvm.riscv.vzext.nxv16i32.nxv16i16( + , , i64); @@ -896,6 +939,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv16i32.nxv16i16( + undef, %0, i64 %1) @@ -926,6 +970,7 @@ } declare @llvm.riscv.vzext.nxv1i16.nxv1i8( + , , i64); @@ -938,6 +983,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv1i16.nxv1i8( + undef, %0, i64 %1) @@ -968,6 +1014,7 @@ } declare @llvm.riscv.vzext.nxv2i16.nxv2i8( + , , i64); @@ -980,6 +1027,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i16.nxv2i8( + undef, %0, i64 %1) @@ -1010,6 +1058,7 @@ } declare @llvm.riscv.vzext.nxv4i16.nxv4i8( + , , i64); @@ -1022,6 +1071,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i16.nxv4i8( + undef, %0, i64 %1) @@ -1052,6 +1102,7 @@ } declare @llvm.riscv.vzext.nxv8i16.nxv8i8( + , , i64); @@ -1064,6 +1115,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i16.nxv8i8( + undef, %0, i64 %1) @@ -1094,6 +1146,7 @@ } declare @llvm.riscv.vzext.nxv16i16.nxv16i8( + , , i64); @@ -1106,6 +1159,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv16i16.nxv16i8( + undef, %0, i64 %1) @@ -1136,6 +1190,7 @@ } declare @llvm.riscv.vzext.nxv32i16.nxv32i8( + , , i64); @@ -1148,6 +1203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv32i16.nxv32i8( + undef, %0, i64 %1)