diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -1755,7 +1755,7 @@ } // 12.16. Vector Integer Move Instructions -let HasMask = false, HasPolicy = false in { +let HasMask = false, HasNoMaskPolicy = true, HasPolicy = false in { let MangledName = "vmv_v" in { defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csil", [["v", "Uv", "UvUv"]]>; @@ -1890,7 +1890,7 @@ } // 14.16. Vector Floating-Point Move Instruction -let HasMask = false, HasNoMaskedOverloaded = false, HasPolicy = false in +let HasMask = false, HasNoMaskPolicy = true, HasNoMaskedOverloaded = false, HasPolicy = false in defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "xfd", [["f", "v", "ve"]]>; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmv.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmv_v_v_i8mf8(vint8mf8_t src, size_t vl) { @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmv_v_v_i8mf4(vint8mf4_t src, size_t vl) { @@ -25,7 +25,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmv_v_v_i8mf2(vint8mf2_t src, size_t vl) { @@ -34,35 +34,35 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmv_v_v_i8m1(vint8m1_t src, size_t vl) { return vmv_v(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmv_v_v_i8m2(vint8m2_t src, size_t vl) { return vmv_v(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmv_v_v_i8m4(vint8m4_t src, size_t vl) { return vmv_v(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmv_v_v_i8m8(vint8m8_t src, size_t vl) { return vmv_v(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmv_v_v_i16mf4(vint16mf4_t src, size_t vl) { @@ -71,7 +71,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmv_v_v_i16mf2(vint16mf2_t src, size_t vl) { @@ -80,7 +80,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmv_v_v_i16m1(vint16m1_t src, size_t vl) { @@ -89,7 +89,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmv_v_v_i16m2(vint16m2_t src, size_t vl) { @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmv_v_v_i16m4(vint16m4_t src, size_t vl) { @@ -107,7 +107,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmv_v_v_i16m8(vint16m8_t src, size_t vl) { @@ -116,7 +116,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmv_v_v_i32mf2(vint32mf2_t src, size_t vl) { @@ -125,7 +125,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmv_v_v_i32m1(vint32m1_t src, size_t vl) { @@ -134,7 +134,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmv_v_v_i32m2(vint32m2_t src, size_t vl) { @@ -143,7 +143,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmv_v_v_i32m4(vint32m4_t src, size_t vl) { @@ -152,7 +152,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmv_v_v_i32m8(vint32m8_t src, size_t vl) { @@ -161,7 +161,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmv_v_v_i64m1(vint64m1_t src, size_t vl) { @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmv_v_v_i64m2(vint64m2_t src, size_t vl) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmv_v_v_i64m4(vint64m4_t src, size_t vl) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmv_v_v_i64m8(vint64m8_t src, size_t vl) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmv_v_v_u8mf8(vuint8mf8_t src, size_t vl) { @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmv_v_v_u8mf4(vuint8mf4_t src, size_t vl) { @@ -215,7 +215,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmv_v_v_u8mf2(vuint8mf2_t src, size_t vl) { @@ -224,7 +224,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmv_v_v_u8m1(vuint8m1_t src, size_t vl) { @@ -233,7 +233,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmv_v_v_u8m2(vuint8m2_t src, size_t vl) { @@ -242,7 +242,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmv_v_v_u8m4(vuint8m4_t src, size_t vl) { @@ -251,7 +251,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmv_v_v_u8m8(vuint8m8_t src, size_t vl) { @@ -260,7 +260,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmv_v_v_u16mf4(vuint16mf4_t src, size_t vl) { @@ -269,7 +269,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmv_v_v_u16mf2(vuint16mf2_t src, size_t vl) { @@ -278,7 +278,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmv_v_v_u16m1(vuint16m1_t src, size_t vl) { @@ -287,7 +287,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmv_v_v_u16m2(vuint16m2_t src, size_t vl) { @@ -296,7 +296,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmv_v_v_u16m4(vuint16m4_t src, size_t vl) { @@ -305,7 +305,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmv_v_v_u16m8(vuint16m8_t src, size_t vl) { @@ -314,7 +314,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmv_v_v_u32mf2(vuint32mf2_t src, size_t vl) { @@ -323,7 +323,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmv_v_v_u32m1(vuint32m1_t src, size_t vl) { @@ -332,7 +332,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmv_v_v_u32m2(vuint32m2_t src, size_t vl) { @@ -341,7 +341,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmv_v_v_u32m4(vuint32m4_t src, size_t vl) { @@ -350,7 +350,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmv_v_v_u32m8(vuint32m8_t src, size_t vl) { @@ -359,7 +359,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmv_v_v_u64m1(vuint64m1_t src, size_t vl) { @@ -368,7 +368,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmv_v_v_u64m2(vuint64m2_t src, size_t vl) { @@ -377,7 +377,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmv_v_v_u64m4(vuint64m4_t src, size_t vl) { @@ -386,7 +386,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmv_v_v_u64m8(vuint64m8_t src, size_t vl) { @@ -395,7 +395,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vmv_v_v_f32mf2(vfloat32mf2_t src, size_t vl) { @@ -404,7 +404,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vmv_v_v_f32m1(vfloat32m1_t src, size_t vl) { @@ -413,7 +413,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vmv_v_v_f32m2(vfloat32m2_t src, size_t vl) { @@ -422,7 +422,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vmv_v_v_f32m4(vfloat32m4_t src, size_t vl) { @@ -431,7 +431,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vmv_v_v_f32m8(vfloat32m8_t src, size_t vl) { @@ -440,7 +440,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vmv_v_v_f64m1(vfloat64m1_t src, size_t vl) { @@ -449,7 +449,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vmv_v_v_f64m2(vfloat64m2_t src, size_t vl) { @@ -458,7 +458,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vmv_v_v_f64m4(vfloat64m4_t src, size_t vl) { @@ -467,7 +467,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vmv_v_v_f64m8(vfloat64m8_t src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfmv_v_f_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1f32.i64(float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1f32.i64( undef, float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmv_v_f_f32mf2(float src, size_t vl) { @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfmv_v_f_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2f32.i64(float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2f32.i64( undef, float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmv_v_f_f32m1(float src, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vfmv_v_f_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4f32.i64(float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4f32.i64( undef, float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmv_v_f_f32m2(float src, size_t vl) { @@ -35,7 +35,7 @@ // CHECK-RV64-LABEL: @test_vfmv_v_f_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8f32.i64(float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8f32.i64( undef, float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmv_v_f_f32m4(float src, size_t vl) { @@ -44,7 +44,7 @@ // CHECK-RV64-LABEL: @test_vfmv_v_f_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv16f32.i64(float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv16f32.i64( undef, float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmv_v_f_f32m8(float src, size_t vl) { @@ -53,7 +53,7 @@ // CHECK-RV64-LABEL: @test_vfmv_v_f_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1f64.i64(double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1f64.i64( undef, double [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmv_v_f_f64m1(double src, size_t vl) { @@ -62,7 +62,7 @@ // CHECK-RV64-LABEL: @test_vfmv_v_f_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2f64.i64(double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2f64.i64( undef, double [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmv_v_f_f64m2(double src, size_t vl) { @@ -71,7 +71,7 @@ // CHECK-RV64-LABEL: @test_vfmv_v_f_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4f64.i64(double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4f64.i64( undef, double [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmv_v_f_f64m4(double src, size_t vl) { @@ -80,7 +80,7 @@ // CHECK-RV64-LABEL: @test_vfmv_v_f_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8f64.i64(double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8f64.i64( undef, double [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmv_v_f_f64m8(double src, size_t vl) { @@ -251,7 +251,7 @@ // CHECK-RV64-LABEL: @test_vfmv_v_f_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1f16.i64( undef, half [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmv_v_f_f16mf4 (_Float16 src, size_t vl) { @@ -260,7 +260,7 @@ // CHECK-RV64-LABEL: @test_vfmv_v_f_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2f16.i64( undef, half [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmv_v_f_f16mf2 (_Float16 src, size_t vl) { @@ -269,7 +269,7 @@ // CHECK-RV64-LABEL: @test_vfmv_v_f_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4f16.i64( undef, half [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmv_v_f_f16m1 (_Float16 src, size_t vl) { @@ -278,7 +278,7 @@ // CHECK-RV64-LABEL: @test_vfmv_v_f_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8f16.i64( undef, half [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmv_v_f_f16m2 (_Float16 src, size_t vl) { @@ -287,7 +287,7 @@ // CHECK-RV64-LABEL: @test_vfmv_v_f_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv16f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv16f16.i64( undef, half [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmv_v_f_f16m4 (_Float16 src, size_t vl) { @@ -296,7 +296,7 @@ // CHECK-RV64-LABEL: @test_vfmv_v_f_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv32f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv32f16.i64( undef, half [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmv_v_f_f16m8 (_Float16 src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmv_v_v_i8mf8(vint8mf8_t src, size_t vl) { @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i8.i64( undef, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmv_v_x_i8mf8(int8_t src, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmv_v_v_i8mf4(vint8mf4_t src, size_t vl) { @@ -35,7 +35,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i8.i64( undef, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmv_v_x_i8mf4(int8_t src, size_t vl) { @@ -44,7 +44,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmv_v_v_i8mf2(vint8mf2_t src, size_t vl) { @@ -53,7 +53,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i8.i64( undef, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmv_v_x_i8mf2(int8_t src, size_t vl) { @@ -62,7 +62,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmv_v_v_i8m1(vint8m1_t src, size_t vl) { @@ -71,7 +71,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i8.i64( undef, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmv_v_x_i8m1(int8_t src, size_t vl) { @@ -80,7 +80,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmv_v_v_i8m2(vint8m2_t src, size_t vl) { @@ -89,7 +89,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i8.i64( undef, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmv_v_x_i8m2(int8_t src, size_t vl) { @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmv_v_v_i8m4(vint8m4_t src, size_t vl) { @@ -107,7 +107,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv32i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv32i8.i64( undef, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmv_v_x_i8m4(int8_t src, size_t vl) { @@ -116,7 +116,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmv_v_v_i8m8(vint8m8_t src, size_t vl) { @@ -125,7 +125,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv64i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv64i8.i64( undef, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmv_v_x_i8m8(int8_t src, size_t vl) { @@ -134,7 +134,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmv_v_v_i16mf4(vint16mf4_t src, size_t vl) { @@ -143,7 +143,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i16.i64( undef, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmv_v_x_i16mf4(int16_t src, size_t vl) { @@ -152,7 +152,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmv_v_v_i16mf2(vint16mf2_t src, size_t vl) { @@ -161,7 +161,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i16.i64( undef, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmv_v_x_i16mf2(int16_t src, size_t vl) { @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmv_v_v_i16m1(vint16m1_t src, size_t vl) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i16.i64( undef, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmv_v_x_i16m1(int16_t src, size_t vl) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmv_v_v_i16m2(vint16m2_t src, size_t vl) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i16.i64( undef, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmv_v_x_i16m2(int16_t src, size_t vl) { @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmv_v_v_i16m4(vint16m4_t src, size_t vl) { @@ -215,7 +215,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i16.i64( undef, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmv_v_x_i16m4(int16_t src, size_t vl) { @@ -224,7 +224,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmv_v_v_i16m8(vint16m8_t src, size_t vl) { @@ -233,7 +233,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv32i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv32i16.i64( undef, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmv_v_x_i16m8(int16_t src, size_t vl) { @@ -242,7 +242,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmv_v_v_i32mf2(vint32mf2_t src, size_t vl) { @@ -251,7 +251,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i32.i64( undef, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmv_v_x_i32mf2(int32_t src, size_t vl) { @@ -260,7 +260,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmv_v_v_i32m1(vint32m1_t src, size_t vl) { @@ -269,7 +269,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i32.i64( undef, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmv_v_x_i32m1(int32_t src, size_t vl) { @@ -278,7 +278,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmv_v_v_i32m2(vint32m2_t src, size_t vl) { @@ -287,7 +287,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i32.i64( undef, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmv_v_x_i32m2(int32_t src, size_t vl) { @@ -296,7 +296,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmv_v_v_i32m4(vint32m4_t src, size_t vl) { @@ -305,7 +305,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i32.i64( undef, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmv_v_x_i32m4(int32_t src, size_t vl) { @@ -314,7 +314,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmv_v_v_i32m8(vint32m8_t src, size_t vl) { @@ -323,7 +323,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i32.i64( undef, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmv_v_x_i32m8(int32_t src, size_t vl) { @@ -332,7 +332,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmv_v_v_i64m1(vint64m1_t src, size_t vl) { @@ -341,7 +341,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i64.i64(i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i64.i64( undef, i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmv_v_x_i64m1(int64_t src, size_t vl) { @@ -350,7 +350,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmv_v_v_i64m2(vint64m2_t src, size_t vl) { @@ -359,7 +359,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i64.i64(i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i64.i64( undef, i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmv_v_x_i64m2(int64_t src, size_t vl) { @@ -368,7 +368,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmv_v_v_i64m4(vint64m4_t src, size_t vl) { @@ -377,7 +377,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i64.i64(i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i64.i64( undef, i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmv_v_x_i64m4(int64_t src, size_t vl) { @@ -386,7 +386,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmv_v_v_i64m8(vint64m8_t src, size_t vl) { @@ -395,7 +395,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i64.i64(i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i64.i64( undef, i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmv_v_x_i64m8(int64_t src, size_t vl) { @@ -404,7 +404,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmv_v_v_u8mf8(vuint8mf8_t src, size_t vl) { @@ -413,7 +413,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i8.i64( undef, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmv_v_x_u8mf8(uint8_t src, size_t vl) { @@ -422,7 +422,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmv_v_v_u8mf4(vuint8mf4_t src, size_t vl) { @@ -431,7 +431,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i8.i64( undef, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmv_v_x_u8mf4(uint8_t src, size_t vl) { @@ -440,7 +440,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmv_v_v_u8mf2(vuint8mf2_t src, size_t vl) { @@ -449,7 +449,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i8.i64( undef, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmv_v_x_u8mf2(uint8_t src, size_t vl) { @@ -458,7 +458,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmv_v_v_u8m1(vuint8m1_t src, size_t vl) { @@ -467,7 +467,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i8.i64( undef, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmv_v_x_u8m1(uint8_t src, size_t vl) { @@ -476,7 +476,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmv_v_v_u8m2(vuint8m2_t src, size_t vl) { @@ -485,7 +485,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i8.i64( undef, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmv_v_x_u8m2(uint8_t src, size_t vl) { @@ -494,7 +494,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmv_v_v_u8m4(vuint8m4_t src, size_t vl) { @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv32i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv32i8.i64( undef, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmv_v_x_u8m4(uint8_t src, size_t vl) { @@ -512,7 +512,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmv_v_v_u8m8(vuint8m8_t src, size_t vl) { @@ -521,7 +521,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv64i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv64i8.i64( undef, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmv_v_x_u8m8(uint8_t src, size_t vl) { @@ -530,7 +530,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmv_v_v_u16mf4(vuint16mf4_t src, size_t vl) { @@ -539,7 +539,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i16.i64( undef, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmv_v_x_u16mf4(uint16_t src, size_t vl) { @@ -548,7 +548,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmv_v_v_u16mf2(vuint16mf2_t src, size_t vl) { @@ -557,7 +557,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i16.i64( undef, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmv_v_x_u16mf2(uint16_t src, size_t vl) { @@ -566,7 +566,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmv_v_v_u16m1(vuint16m1_t src, size_t vl) { @@ -575,7 +575,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i16.i64( undef, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmv_v_x_u16m1(uint16_t src, size_t vl) { @@ -584,7 +584,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmv_v_v_u16m2(vuint16m2_t src, size_t vl) { @@ -593,7 +593,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i16.i64( undef, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmv_v_x_u16m2(uint16_t src, size_t vl) { @@ -602,7 +602,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmv_v_v_u16m4(vuint16m4_t src, size_t vl) { @@ -611,7 +611,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i16.i64( undef, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmv_v_x_u16m4(uint16_t src, size_t vl) { @@ -620,7 +620,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmv_v_v_u16m8(vuint16m8_t src, size_t vl) { @@ -629,7 +629,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv32i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv32i16.i64( undef, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmv_v_x_u16m8(uint16_t src, size_t vl) { @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmv_v_v_u32mf2(vuint32mf2_t src, size_t vl) { @@ -647,7 +647,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i32.i64( undef, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmv_v_x_u32mf2(uint32_t src, size_t vl) { @@ -656,7 +656,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmv_v_v_u32m1(vuint32m1_t src, size_t vl) { @@ -665,7 +665,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i32.i64( undef, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmv_v_x_u32m1(uint32_t src, size_t vl) { @@ -674,7 +674,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmv_v_v_u32m2(vuint32m2_t src, size_t vl) { @@ -683,7 +683,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i32.i64( undef, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmv_v_x_u32m2(uint32_t src, size_t vl) { @@ -692,7 +692,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmv_v_v_u32m4(vuint32m4_t src, size_t vl) { @@ -701,7 +701,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i32.i64( undef, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmv_v_x_u32m4(uint32_t src, size_t vl) { @@ -710,7 +710,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmv_v_v_u32m8(vuint32m8_t src, size_t vl) { @@ -719,7 +719,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i32.i64( undef, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmv_v_x_u32m8(uint32_t src, size_t vl) { @@ -728,7 +728,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmv_v_v_u64m1(vuint64m1_t src, size_t vl) { @@ -737,7 +737,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i64.i64(i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i64.i64( undef, i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmv_v_x_u64m1(uint64_t src, size_t vl) { @@ -746,7 +746,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmv_v_v_u64m2(vuint64m2_t src, size_t vl) { @@ -755,7 +755,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i64.i64(i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i64.i64( undef, i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmv_v_x_u64m2(uint64_t src, size_t vl) { @@ -764,7 +764,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmv_v_v_u64m4(vuint64m4_t src, size_t vl) { @@ -773,7 +773,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i64.i64(i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i64.i64( undef, i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmv_v_x_u64m4(uint64_t src, size_t vl) { @@ -782,7 +782,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmv_v_v_u64m8(vuint64m8_t src, size_t vl) { @@ -791,7 +791,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_x_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i64.i64(i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i64.i64( undef, i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmv_v_x_u64m8(uint64_t src, size_t vl) { @@ -800,7 +800,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vmv_v_v_f32mf2(vfloat32mf2_t src, size_t vl) { @@ -809,7 +809,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vmv_v_v_f32m1(vfloat32m1_t src, size_t vl) { @@ -818,7 +818,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vmv_v_v_f32m2(vfloat32m2_t src, size_t vl) { @@ -827,7 +827,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vmv_v_v_f32m4(vfloat32m4_t src, size_t vl) { @@ -836,7 +836,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vmv_v_v_f32m8(vfloat32m8_t src, size_t vl) { @@ -845,7 +845,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vmv_v_v_f64m1(vfloat64m1_t src, size_t vl) { @@ -854,7 +854,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vmv_v_v_f64m2(vfloat64m2_t src, size_t vl) { @@ -863,7 +863,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vmv_v_v_f64m4(vfloat64m4_t src, size_t vl) { @@ -872,7 +872,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vmv_v_v_f64m8(vfloat64m8_t src, size_t vl) { @@ -1645,7 +1645,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vmv_v_v_f16mf4 (vfloat16mf4_t src, size_t vl) { @@ -1654,7 +1654,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vmv_v_v_f16mf2 (vfloat16mf2_t src, size_t vl) { @@ -1663,7 +1663,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vmv_v_v_f16m1 (vfloat16m1_t src, size_t vl) { @@ -1672,7 +1672,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vmv_v_v_f16m2 (vfloat16m2_t src, size_t vl) { @@ -1681,7 +1681,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vmv_v_v_f16m4 (vfloat16m4_t src, size_t vl) { @@ -1690,7 +1690,7 @@ // CHECK-RV64-LABEL: @test_vmv_v_v_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vmv_v_v_f16m8 (vfloat16m8_t src, size_t vl) { diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -1266,20 +1266,29 @@ defm vmerge : RISCVBinaryWithV0; + // Output: (vector) + // Input: (passthru, vector_in, vl) def int_riscv_vmv_v_v : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, + llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 1; + let VLOperand = 2; } + // Output: (vector) + // Input: (passthru, scalar, vl) def int_riscv_vmv_v_x : Intrinsic<[llvm_anyint_ty], - [LLVMVectorElementType<0>, llvm_anyint_ty], + [LLVMMatchType<0>, LLVMVectorElementType<0>, + llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 1; + let VLOperand = 2; } + // Output: (vector) + // Input: (passthru, scalar, vl) def int_riscv_vfmv_v_f : Intrinsic<[llvm_anyfloat_ty], - [LLVMVectorElementType<0>,llvm_anyint_ty], + [LLVMMatchType<0>, LLVMVectorElementType<0>, + llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 1; + let VLOperand = 2; } def int_riscv_vmv_x_s : Intrinsic<[LLVMVectorElementType<0>], diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -56,7 +56,8 @@ VT.isInteger() ? RISCVISD::VMV_V_X_VL : RISCVISD::VFMV_V_F_VL; SDLoc DL(N); SDValue VL = CurDAG->getRegister(RISCV::X0, Subtarget->getXLenVT()); - SDValue Result = CurDAG->getNode(Opc, DL, VT, N->getOperand(0), VL); + SDValue Result = CurDAG->getNode(Opc, DL, VT, CurDAG->getUNDEF(VT), + N->getOperand(0), VL); --I; CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result); @@ -71,11 +72,12 @@ if (N->getOpcode() != RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) continue; - assert(N->getNumOperands() == 3 && "Unexpected number of operands"); + assert(N->getNumOperands() == 4 && "Unexpected number of operands"); MVT VT = N->getSimpleValueType(0); - SDValue Lo = N->getOperand(0); - SDValue Hi = N->getOperand(1); - SDValue VL = N->getOperand(2); + SDValue Passthru = N->getOperand(0); + SDValue Lo = N->getOperand(1); + SDValue Hi = N->getOperand(2); + SDValue VL = N->getOperand(3); assert(VT.getVectorElementType() == MVT::i64 && VT.isScalableVector() && Lo.getValueType() == MVT::i32 && Hi.getValueType() == MVT::i32 && "Unexpected VTs!"); @@ -106,7 +108,7 @@ CurDAG->getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64); SDValue Ops[] = {Chain, IntID, - CurDAG->getUNDEF(VT), + Passthru, StackSlot, CurDAG->getRegister(RISCV::X0, MVT::i64), VL}; @@ -1624,9 +1626,10 @@ // Try to match splat of a scalar load to a strided load with stride of x0. bool IsScalarMove = Node->getOpcode() == RISCVISD::VMV_S_X_VL || Node->getOpcode() == RISCVISD::VFMV_S_F_VL; - if (IsScalarMove && !Node->getOperand(0).isUndef()) + bool HasPassthruOperand = Node->getOpcode() != ISD::SPLAT_VECTOR; + if (HasPassthruOperand && !IsScalarMove && !Node->getOperand(0).isUndef()) break; - SDValue Src = IsScalarMove ? Node->getOperand(1) : Node->getOperand(0); + SDValue Src = HasPassthruOperand ? Node->getOperand(1) : Node->getOperand(0); auto *Ld = dyn_cast(Src); if (!Ld) break; @@ -1648,7 +1651,7 @@ break; selectVLOp(Node->getOperand(2), VL); } else - selectVLOp(Node->getOperand(1), VL); + selectVLOp(Node->getOperand(2), VL); unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT); @@ -1924,9 +1927,9 @@ } bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) { - if (N.getOpcode() != RISCVISD::VMV_V_X_VL) + if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef()) return false; - SplatVal = N.getOperand(0); + SplatVal = N.getOperand(1); return true; } @@ -1936,11 +1939,12 @@ SelectionDAG &DAG, const RISCVSubtarget &Subtarget, ValidateFn ValidateImm) { - if (N.getOpcode() != RISCVISD::VMV_V_X_VL || - !isa(N.getOperand(0))) + if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() || + !isa(N.getOperand(1))) return false; - int64_t SplatImm = cast(N.getOperand(0))->getSExtValue(); + int64_t SplatImm = + cast(N.getOperand(1))->getSExtValue(); // The semantics of RISCVISD::VMV_V_X_VL is that when the operand // type is wider than the resulting vector element type: an implicit @@ -1950,7 +1954,7 @@ // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first // sign-extending to (XLenVT -1). MVT XLenVT = Subtarget.getXLenVT(); - assert(XLenVT == N.getOperand(0).getSimpleValueType() && + assert(XLenVT == N.getOperand(1).getSimpleValueType() && "Unexpected splat operand type"); MVT EltVT = N.getSimpleValueType().getVectorElementType(); if (EltVT.bitsLT(XLenVT)) @@ -1983,11 +1987,12 @@ } bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) { - if (N.getOpcode() != RISCVISD::VMV_V_X_VL || - !isa(N.getOperand(0))) + if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() || + !isa(N.getOperand(1))) return false; - int64_t SplatImm = cast(N.getOperand(0))->getSExtValue(); + int64_t SplatImm = + cast(N.getOperand(1))->getSExtValue(); if (!isUInt<5>(SplatImm)) return false; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -129,10 +129,12 @@ BFPW, // Vector Extension // VMV_V_X_VL matches the semantics of vmv.v.x but includes an extra operand - // for the VL value to be used for the operation. + // for the VL value to be used for the operation. The first operand is + // passthru operand. VMV_V_X_VL, // VFMV_V_F_VL matches the semantics of vfmv.v.f but includes an extra operand - // for the VL value to be used for the operation. + // for the VL value to be used for the operation. The first operand is + // passthru operand. VFMV_V_F_VL, // VMV_X_S matches the semantics of vmv.x.s. The result is always XLenVT sign // extended from the vector element size. @@ -143,6 +145,7 @@ VFMV_S_F_VL, // Splats an 64-bit value that has been split into two i32 parts. This is // expanded late to two scalar stores and a stride 0 vector load. + // The first operand is passthru operand. SPLAT_VECTOR_SPLIT_I64_VL, // Read VLENB CSR READ_VLENB, diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1907,7 +1907,8 @@ unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL; - SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, Op.getOperand(0), VL); + SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), + Op.getOperand(0), VL); return convertFromScalableVector(VT, Splat, DAG, Subtarget); } @@ -2165,7 +2166,8 @@ return Gather; unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL; - Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL); + Splat = + DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Splat, VL); return convertFromScalableVector(VT, Splat, DAG, Subtarget); } @@ -2273,6 +2275,7 @@ getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget); SDValue Splat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT, + DAG.getUNDEF(ViaContainerVT), DAG.getConstant(SplatValue, DL, XLenVT), ViaVL); Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget); return DAG.getBitcast(VT, Splat); @@ -2360,15 +2363,19 @@ return SDValue(); } -static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Lo, - SDValue Hi, SDValue VL, SelectionDAG &DAG) { +static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru, + SDValue Lo, SDValue Hi, SDValue VL, + SelectionDAG &DAG) { + bool HasPassthru = Passthru && !Passthru.isUndef(); + if (!HasPassthru && !Passthru) + Passthru = DAG.getUNDEF(VT); if (isa(Lo) && isa(Hi)) { int32_t LoC = cast(Lo)->getSExtValue(); int32_t HiC = cast(Hi)->getSExtValue(); // If Hi constant is all the same sign bit as Lo, lower this as a custom // node in order to try and match RVV vector/scalar instructions. if ((LoC >> 31) == HiC) - return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL); + return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Lo, VL); // If vl is equal to XLEN_MAX and Hi constant is equal to Lo, we could use // vmv.v.x whose EEW = 32 to lower it. @@ -2377,41 +2384,46 @@ MVT InterVT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2); // TODO: if vl <= min(VLMAX), we can also do this. But we could not // access the subtarget here now. - auto InterVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, InterVT, Lo, + auto InterVec = DAG.getNode( + RISCVISD::VMV_V_X_VL, DL, InterVT, DAG.getUNDEF(InterVT), Lo, DAG.getRegister(RISCV::X0, MVT::i32)); return DAG.getNode(ISD::BITCAST, DL, VT, InterVec); } } // Fall back to a stack store and stride x0 vector load. - return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Lo, Hi, VL); + return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Passthru, Lo, + Hi, VL); } // Called by type legalization to handle splat of i64 on RV32. // FIXME: We can optimize this when the type has sign or zero bits in one // of the halves. -static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar, - SDValue VL, SelectionDAG &DAG) { +static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru, + SDValue Scalar, SDValue VL, + SelectionDAG &DAG) { assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!"); SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar, DAG.getConstant(0, DL, MVT::i32)); SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar, DAG.getConstant(1, DL, MVT::i32)); - return splatPartsI64WithVL(DL, VT, Lo, Hi, VL, DAG); + return splatPartsI64WithVL(DL, VT, Passthru, Lo, Hi, VL, DAG); } // This function lowers a splat of a scalar operand Splat with the vector // length VL. It ensures the final sequence is type legal, which is useful when // lowering a splat after type legalization. -static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL, - SelectionDAG &DAG, +static SDValue lowerScalarSplat(SDValue Passthru, SDValue Scalar, SDValue VL, + MVT VT, SDLoc DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget) { + bool HasPassthru = Passthru && !Passthru.isUndef(); + if (!HasPassthru && !Passthru) + Passthru = DAG.getUNDEF(VT); if (VT.isFloatingPoint()) { // If VL is 1, we could use vfmv.s.f. if (isOneConstant(VL)) - return DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, VT, DAG.getUNDEF(VT), - Scalar, VL); - return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Scalar, VL); + return DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, VT, Passthru, Scalar, VL); + return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Passthru, Scalar, VL); } MVT XLenVT = Subtarget.getXLenVT(); @@ -2430,20 +2442,19 @@ // use vmv.s.x. if (isOneConstant(VL) && (!Const || isNullConstant(Scalar) || !isInt<5>(Const->getSExtValue()))) - return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, DAG.getUNDEF(VT), Scalar, - VL); - return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Scalar, VL); + return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru, Scalar, VL); + return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Scalar, VL); } assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 && "Unexpected scalar for splat lowering!"); if (isOneConstant(VL) && isNullConstant(Scalar)) - return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, DAG.getUNDEF(VT), + return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru, DAG.getConstant(0, DL, XLenVT), VL); // Otherwise use the more complicated splatting algorithm. - return splatSplitI64WithVL(DL, VT, Scalar, VL, DAG); + return splatSplitI64WithVL(DL, VT, Passthru, Scalar, VL, DAG); } // Is the mask a slidedown that shifts in undefs. @@ -2659,7 +2670,8 @@ unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL; - SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, V, VL); + SDValue Splat = + DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), V, VL); return convertFromScalableVector(VT, Splat, DAG, Subtarget); } @@ -2768,6 +2780,7 @@ V2, TrueMask, VL); // Create 2^eltbits - 1 copies of V2 by multiplying by the largest integer. SDValue Multiplier = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntHalfVT, + DAG.getUNDEF(IntHalfVT), DAG.getAllOnesConstant(DL, XLenVT)); SDValue WidenMul = DAG.getNode(RISCVISD::VWMULU_VL, DL, WideIntContainerVT, V2, Multiplier, TrueMask, VL); @@ -2871,7 +2884,8 @@ // TODO: This doesn't trigger for i64 vectors on RV32, since there we // encounter a bitcasted BUILD_VECTOR with low/high i32 values. if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) { - Gather = lowerScalarSplat(SplatValue, VL, ContainerVT, DL, DAG, Subtarget); + Gather = lowerScalarSplat(SDValue(), SplatValue, VL, ContainerVT, DL, DAG, + Subtarget); } else { V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget); // If only one index is used, we can use a "splat" vrgather. @@ -4243,7 +4257,8 @@ std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); - SDValue Res = splatPartsI64WithVL(DL, ContainerVT, Lo, Hi, VL, DAG); + SDValue Res = + splatPartsI64WithVL(DL, ContainerVT, SDValue(), Lo, Hi, VL, DAG); return convertFromScalableVector(VecVT, Res, DAG, Subtarget); } @@ -4253,19 +4268,20 @@ // If Hi constant is all the same sign bit as Lo, lower this as a custom // node in order to try and match RVV vector/scalar instructions. if ((LoC >> 31) == HiC) - return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, Lo, - DAG.getRegister(RISCV::X0, MVT::i32)); + return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT), + Lo, DAG.getRegister(RISCV::X0, MVT::i32)); } // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended. if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo && isa(Hi.getOperand(1)) && Hi.getConstantOperandVal(1) == 31) - return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, Lo, + return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT), Lo, DAG.getRegister(RISCV::X0, MVT::i32)); // Fall back to use a stack store and stride x0 vector load. Use X0 as VL. - return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT, Lo, Hi, + return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT, + DAG.getUNDEF(VecVT), Lo, Hi, DAG.getRegister(RISCV::X0, MVT::i32)); } @@ -4298,10 +4314,12 @@ SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero); SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal); } else { - SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, SplatZero, - DAG.getRegister(RISCV::X0, XLenVT)); - SplatTrueVal = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, SplatTrueVal, - DAG.getRegister(RISCV::X0, XLenVT)); + SplatZero = + DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT), + SplatZero, DAG.getRegister(RISCV::X0, XLenVT)); + SplatTrueVal = + DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT), + SplatTrueVal, DAG.getRegister(RISCV::X0, XLenVT)); } return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero); @@ -4316,9 +4334,10 @@ SDValue Mask, VL; std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); - SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL); - SplatTrueVal = - DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL); + SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, + DAG.getUNDEF(ContainerVT), SplatZero, VL); + SplatTrueVal = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, + DAG.getUNDEF(ContainerVT), SplatTrueVal, VL); SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, SplatTrueVal, SplatZero, VL); @@ -4376,8 +4395,10 @@ SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT()); SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); - SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne); - SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero); + SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, + DAG.getUNDEF(ContainerVT), SplatOne); + SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, + DAG.getUNDEF(ContainerVT), SplatZero); if (VecVT.isScalableVector()) { SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne); @@ -4473,8 +4494,8 @@ SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT); // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied // undef doesn't obey the earlyclobber constraint. Just splat a zero value. - ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero, - InsertI64VL); + ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, + DAG.getUNDEF(I32ContainerVT), Zero, InsertI64VL); // First slide in the hi value, then the lo in underneath it. ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, DAG.getUNDEF(I32ContainerVT), ValInVec, ValHi, @@ -4654,7 +4675,7 @@ // be sign extended? SDValue VL = getVLOperand(Op); assert(VL.getValueType() == XLenVT); - ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG); + ScalarOp = splatSplitI64WithVL(DL, VT, SDValue(), ScalarOp, VL, DAG); return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands); } @@ -4723,10 +4744,11 @@ Op.getOperand(1)); case Intrinsic::riscv_vmv_v_x: return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2), - Op.getSimpleValueType(), DL, DAG, Subtarget); + Op.getOperand(3), Op.getSimpleValueType(), DL, DAG, + Subtarget); case Intrinsic::riscv_vfmv_v_f: return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(), - Op.getOperand(1), Op.getOperand(2)); + Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); case Intrinsic::riscv_vmv_s_x: { SDValue Scalar = Op.getOperand(2); @@ -4757,9 +4779,10 @@ SDValue Vec = Op.getOperand(1); SDValue VL = getVLOperand(Op); - SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Scalar, VL, DAG); - SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, - DAG.getConstant(0, DL, MVT::i32), VL); + SDValue SplattedVal = splatSplitI64WithVL(DL, VT, SDValue(), Scalar, VL, DAG); + SDValue SplattedIdx = + DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT), + DAG.getConstant(0, DL, MVT::i32), VL); MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount()); SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); @@ -5134,8 +5157,9 @@ SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags()); - SDValue IdentitySplat = lowerScalarSplat( - NeutralElem, DAG.getConstant(1, DL, XLenVT), M1VT, DL, DAG, Subtarget); + SDValue IdentitySplat = + lowerScalarSplat(SDValue(), NeutralElem, DAG.getConstant(1, DL, XLenVT), + M1VT, DL, DAG, Subtarget); SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT), Vec, IdentitySplat, Mask, VL); SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction, @@ -5196,8 +5220,9 @@ SDValue Mask, VL; std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); - SDValue ScalarSplat = lowerScalarSplat( - ScalarVal, DAG.getConstant(1, DL, XLenVT), M1VT, DL, DAG, Subtarget); + SDValue ScalarSplat = + lowerScalarSplat(SDValue(), ScalarVal, DAG.getConstant(1, DL, XLenVT), + M1VT, DL, DAG, Subtarget); SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT), VectorVal, ScalarSplat, Mask, VL); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction, @@ -5263,9 +5288,9 @@ MVT XLenVT = Subtarget.getXLenVT(); MVT ResVT = !VecVT.isInteger() || VecEltVT.bitsGE(XLenVT) ? VecEltVT : XLenVT; - SDValue StartSplat = - lowerScalarSplat(Op.getOperand(0), DAG.getConstant(1, DL, XLenVT), M1VT, - DL, DAG, Subtarget); + SDValue StartSplat = lowerScalarSplat(SDValue(), Op.getOperand(0), + DAG.getConstant(1, DL, XLenVT), M1VT, + DL, DAG, Subtarget); SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, StartSplat, Vec, StartSplat, Mask, VL); SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction, @@ -5567,13 +5592,13 @@ if (StepValImm != 1) { if (isPowerOf2_64(StepValImm)) { SDValue StepVal = - DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, + DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT), DAG.getConstant(Log2_64(StepValImm), DL, XLenVT)); StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal); } else { SDValue StepVal = lowerScalarSplat( - DAG.getConstant(StepValImm, DL, VT.getVectorElementType()), VL, VT, - DL, DAG, Subtarget); + SDValue(), DAG.getConstant(StepValImm, DL, VT.getVectorElementType()), + VL, VT, DL, DAG, Subtarget); StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal); } } @@ -5649,8 +5674,8 @@ if (!IsRV32E64) SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1); else - SplatVL = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT, VLMinus1, - DAG.getRegister(RISCV::X0, XLenVT)); + SplatVL = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT, DAG.getUNDEF(IntVT), + VLMinus1, DAG.getRegister(RISCV::X0, XLenVT)); SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL); SDValue Indices = @@ -5904,9 +5929,9 @@ SDValue Mask, VL; std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); - SDValue SplatZero = - DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, - DAG.getConstant(0, DL, Subtarget.getXLenVT())); + SDValue SplatZero = DAG.getNode( + RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT), + DAG.getConstant(0, DL, Subtarget.getXLenVT())); SDValue NegX = DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL); SDValue Max = @@ -6818,6 +6843,7 @@ // To extract the upper XLEN bits of the vector element, shift the first // element right by 32 bits and re-extract the lower XLEN bits. SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, + DAG.getUNDEF(ContainerVT), DAG.getConstant(32, DL, XLenVT), VL); SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec, ThirtyTwoV, Mask, VL); @@ -6920,8 +6946,9 @@ SDValue VL = DAG.getConstant(1, DL, XLenVT); MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount()); SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); - SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, - DAG.getConstant(32, DL, XLenVT), VL); + SDValue ThirtyTwoV = + DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT), + DAG.getConstant(32, DL, XLenVT), VL); SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL); SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32); @@ -7729,8 +7756,8 @@ // Look for splats on the left hand side of a vwadd(u).wv. We might be able // to commute and use a vwadd(u).vx instead. if (IsAdd && Op0.getOpcode() == RISCVISD::VMV_V_X_VL && - Op0.getOperand(1) == VL) { - Op0 = Op0.getOperand(0); + Op0.getOperand(0).isUndef() && Op0.getOperand(2) == VL) { + Op0 = Op0.getOperand(1); // See if have enough sign bits or zero bits in the scalar to use a // widening add/sub by splatting to smaller element size. @@ -7750,7 +7777,8 @@ return SDValue(); } - Op0 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT, Op0, VL); + Op0 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT, + DAG.getUNDEF(NarrowVT), Op0, VL); return DAG.getNode(VOpc, DL, VT, Op1, Op0, Mask, VL); } @@ -7802,12 +7830,15 @@ } else if (Op1.getOpcode() == RISCVISD::VMV_V_X_VL) { // The operand is a splat of a scalar. + // The pasthru must be undef for tail agnostic + if (!Op1.getOperand(0).isUndef()) + return SDValue(); // The VL must be the same. - if (Op1.getOperand(1) != VL) + if (Op1.getOperand(2) != VL) return SDValue(); // Get the scalar value. - Op1 = Op1.getOperand(0); + Op1 = Op1.getOperand(1); // See if have enough sign bits or zero bits in the scalar to use a // widening multiply by splatting to smaller element size. @@ -7827,7 +7858,8 @@ return SDValue(); } - Op1 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT, Op1, VL); + Op1 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT, + DAG.getUNDEF(NarrowVT), Op1, VL); } else return SDValue(); @@ -8399,8 +8431,8 @@ SDLoc DL(N); SDValue VL = N->getOperand(3); EVT VT = N->getValueType(0); - ShAmt = - DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, ShAmt.getOperand(0), VL); + ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT), + ShAmt.getOperand(1), VL); return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt, N->getOperand(2), N->getOperand(3)); } @@ -8414,7 +8446,8 @@ // We don't need the upper 32 bits of a 64-bit element for a shift amount. SDLoc DL(N); EVT VT = N->getValueType(0); - ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, ShAmt.getOperand(0), + ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT), + ShAmt.getOperand(1), DAG.getRegister(RISCV::X0, Subtarget.getXLenVT())); return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt); } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -859,6 +859,21 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoUnaryNoDummyMaskTU : + Pseudo<(outs RetClass:$rd), + (ins RetClass:$dest, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasMergeOp = 1; + let Constraints = "$rd = $dest"; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoNullaryNoMask: Pseudo<(outs RegClass:$rd), (ins AVL:$vl, ixlenimm:$sew), @@ -2000,6 +2015,12 @@ Sched<[WriteVIMovX, ReadVIMovX]>; def "_I_" # m.MX : VPseudoUnaryNoDummyMask, Sched<[WriteVIMovI]>; + def "_V_" # m.MX # "_TU": VPseudoUnaryNoDummyMaskTU, + Sched<[WriteVIMovV, ReadVIMovV]>; + def "_X_" # m.MX # "_TU": VPseudoUnaryNoDummyMaskTU, + Sched<[WriteVIMovX, ReadVIMovX]>; + def "_I_" # m.MX # "_TU": VPseudoUnaryNoDummyMaskTU, + Sched<[WriteVIMovI]>; } } } @@ -2011,6 +2032,9 @@ def "_" # f.FX # "_" # m.MX : VPseudoUnaryNoDummyMask, Sched<[WriteVFMovV, ReadVFMovF]>; + def "_" # f.FX # "_" # m.MX # "_TU": + VPseudoUnaryNoDummyMaskTU, + Sched<[WriteVFMovV, ReadVFMovF]>; } } } @@ -5071,10 +5095,16 @@ // 12.16. Vector Integer Move Instructions //===----------------------------------------------------------------------===// foreach vti = AllVectors in { - def : Pat<(vti.Vector (int_riscv_vmv_v_v (vti.Vector vti.RegClass:$rs1), + def : Pat<(vti.Vector (int_riscv_vmv_v_v (vti.Vector undef), + (vti.Vector vti.RegClass:$rs1), VLOpFrag)), (!cast("PseudoVMV_V_V_"#vti.LMul.MX) $rs1, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (int_riscv_vmv_v_v (vti.Vector vti.RegClass:$passthru), + (vti.Vector vti.RegClass:$rs1), + VLOpFrag)), + (!cast("PseudoVMV_V_V_"#vti.LMul.MX#"_TU") + $passthru, $rs1, GPR:$vl, vti.Log2SEW)>; // vmv.v.x/vmv.v.i are handled in RISCInstrVInstrInfoVVLPatterns.td } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -584,7 +584,7 @@ foreach vti = AllIntegerVectors in { // Emit shift by 1 as an add since it might be faster. def : Pat<(shl (vti.Vector vti.RegClass:$rs1), - (vti.Vector (riscv_vmv_v_x_vl 1, (XLenVT srcvalue)))), + (vti.Vector (riscv_vmv_v_x_vl (vti.Vector undef), 1, (XLenVT srcvalue)))), (!cast("PseudoVADD_VV_"# vti.LMul.MX) vti.RegClass:$rs1, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -47,13 +47,15 @@ SDTCisVT<4, XLenVT>]>; def riscv_vmv_v_x_vl : SDNode<"RISCVISD::VMV_V_X_VL", - SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisInt<0>, - SDTCisVT<1, XLenVT>, - SDTCisVT<2, XLenVT>]>>; + SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<0>, + SDTCisSameAs<0, 1>, + SDTCisVT<2, XLenVT>, + SDTCisVT<3, XLenVT>]>>; def riscv_vfmv_v_f_vl : SDNode<"RISCVISD::VFMV_V_F_VL", - SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisFP<0>, - SDTCisEltOfVec<1, 0>, - SDTCisVT<2, XLenVT>]>>; + SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisFP<0>, + SDTCisSameAs<0, 1>, + SDTCisEltOfVec<2, 0>, + SDTCisVT<3, XLenVT>]>>; def riscv_vmv_s_x_vl : SDNode<"RISCVISD::VMV_S_X_VL", SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisInt<0>, @@ -295,7 +297,7 @@ // Ignore the vl operand. def SplatFPOp : PatFrag<(ops node:$op), - (riscv_vfmv_v_f_vl node:$op, srcvalue)>; + (riscv_vfmv_v_f_vl undef, node:$op, srcvalue)>; def sew8simm5 : ComplexPattern", []>; def sew16simm5 : ComplexPattern", []>; @@ -765,7 +767,7 @@ foreach vti = AllIntegerVectors in { // Emit shift by 1 as an add since it might be faster. def : Pat<(riscv_shl_vl (vti.Vector vti.RegClass:$rs1), - (riscv_vmv_v_x_vl 1, (XLenVT srcvalue)), + (riscv_vmv_v_x_vl (vti.Vector undef), 1, (XLenVT srcvalue)), (vti.Mask true_mask), VLOpFrag), (!cast("PseudoVADD_VV_"# vti.LMul.MX) @@ -1045,14 +1047,21 @@ // 12.16. Vector Integer Move Instructions foreach vti = AllIntegerVectors in { - def : Pat<(vti.Vector (riscv_vmv_v_x_vl GPR:$rs2, VLOpFrag)), + def : Pat<(vti.Vector (riscv_vmv_v_x_vl (vti.Vector undef), GPR:$rs2, VLOpFrag)), (!cast("PseudoVMV_V_X_"#vti.LMul.MX) $rs2, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (riscv_vmv_v_x_vl vti.Vector:$passthru, GPR:$rs2, VLOpFrag)), + (!cast("PseudoVMV_V_X_"#vti.LMul.MX#"_TU") + $passthru, $rs2, GPR:$vl, vti.Log2SEW)>; defvar ImmPat = !cast("sew"#vti.SEW#"simm5"); - def : Pat<(vti.Vector (riscv_vmv_v_x_vl (ImmPat XLenVT:$imm5), + def : Pat<(vti.Vector (riscv_vmv_v_x_vl (vti.Vector undef), (ImmPat XLenVT:$imm5), VLOpFrag)), (!cast("PseudoVMV_V_I_"#vti.LMul.MX) XLenVT:$imm5, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (riscv_vmv_v_x_vl vti.Vector:$passthru, (ImmPat XLenVT:$imm5), + VLOpFrag)), + (!cast("PseudoVMV_V_I_"#vti.LMul.MX#"_TU") + $passthru, XLenVT:$imm5, GPR:$vl, vti.Log2SEW)>; } // 12.1. Vector Single-Width Saturating Add and Subtract @@ -1336,16 +1345,26 @@ // 14.16. Vector Floating-Point Move Instruction // If we're splatting fpimm0, use vmv.v.x vd, x0. def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl - (fvti.Scalar (fpimm0)), VLOpFrag)), + (fvti.Vector undef), (fvti.Scalar (fpimm0)), VLOpFrag)), (!cast("PseudoVMV_V_I_"#fvti.LMul.MX) 0, GPR:$vl, fvti.Log2SEW)>; + def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl + fvti.Vector:$passthru, (fvti.Scalar (fpimm0)), VLOpFrag)), + (!cast("PseudoVMV_V_I_"#fvti.LMul.MX#"_TU") + $passthru, 0, GPR:$vl, fvti.Log2SEW)>; def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl - (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)), + (fvti.Vector undef), (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)), (!cast("PseudoVFMV_V_" # fvti.ScalarSuffix # "_" # fvti.LMul.MX) (fvti.Scalar fvti.ScalarRegClass:$rs2), GPR:$vl, fvti.Log2SEW)>; + def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl + fvti.Vector:$passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)), + (!cast("PseudoVFMV_V_" # fvti.ScalarSuffix # "_" # + fvti.LMul.MX # "_TU") + $passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), + GPR:$vl, fvti.Log2SEW)>; // 14.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions defm : VPatConvertFP2ISDNode_V_VL; diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll @@ -395,7 +395,7 @@ ret %vc } -declare @llvm.riscv.vmv.v.x.nxv8i8(i8, i32); +declare @llvm.riscv.vmv.v.x.nxv8i8(, i8, i32); ; Test that we don't optimize ult x, 0 -> ule x, -1 define @icmp_ult_vi_nxv8i8_5( %va, i32 %vl) { @@ -404,7 +404,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, zero ; CHECK-NEXT: ret - %splat = call @llvm.riscv.vmv.v.x.nxv8i8(i8 0, i32 %vl) + %splat = call @llvm.riscv.vmv.v.x.nxv8i8( undef, i8 0, i32 %vl) %vc = icmp ult %va, %splat ret %vc } @@ -1038,7 +1038,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret - %splat = call @llvm.riscv.vmv.v.x.nxv8i8(i8 0, i32 %vl) + %splat = call @llvm.riscv.vmv.v.x.nxv8i8( undef, i8 0, i32 %vl) %vc = icmp uge %va, %splat ret %vc } diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll @@ -296,7 +296,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret - %splat = call @llvm.riscv.vmv.v.x.nxv8i8(i8 0, i64 %vl) + %splat = call @llvm.riscv.vmv.v.x.nxv8i8( undef, i8 0, i64 %vl) %vc = icmp uge %va, %splat ret %vc } @@ -409,7 +409,7 @@ ret %vc } -declare @llvm.riscv.vmv.v.x.nxv8i8(i8, i64); +declare @llvm.riscv.vmv.v.x.nxv8i8(, i8, i64); ; Test that we don't optimize ult x, 0 -> ule x, -1 define @icmp_ult_vi_nxv8i8_5( %va, i64 %vl) { @@ -418,7 +418,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, zero ; CHECK-NEXT: ret - %splat = call @llvm.riscv.vmv.v.x.nxv8i8(i8 0, i64 %vl) + %splat = call @llvm.riscv.vmv.v.x.nxv8i8( undef, i8 0, i64 %vl) %vc = icmp ult %va, %splat ret %vc } diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll --- a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll @@ -3224,3 +3224,112 @@ ret %a } + +declare @llvm.riscv.vmv.v.v.nxv1i8( + , + , + iXLen); + +define @intrinsic_vmv.v.v_v_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmv.v.v_v_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vmv.v.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmv.v.v_v_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vmv.v.v v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.v.v.nxv1i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmv.v.v.nxv1f32( + , + , + iXLen); + +define @intrinsic_vmv.v.v_v_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmv.v.v_v_nxv1f32_nxv1f32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; RV32-NEXT: vmv.v.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmv.v.v_v_nxv1f32_nxv1f32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; RV64-NEXT: vmv.v.v v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.v.v.nxv1f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmv.v.x.nxv1i64( + , + i64, + iXLen); + +define @intrinsic_vmv.v.x_x_nxv1i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vmv.v.x_x_nxv1i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v8, (a0), zero +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmv.v.x_x_nxv1i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vmv.v.x v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.v.x.nxv1i64( + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv1f32( + , + float, + iXLen); + +define @intrinsic_vfmv.v.f_f_nxv1f32( %0, float %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vfmv.v.f_f_nxv1f32: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; RV32-NEXT: vfmv.v.f v8, fa0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfmv.v.f_f_nxv1f32: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; RV64-NEXT: vfmv.v.f v8, fa0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.v.f.nxv1f32( + %0, + float %1, + iXLen %2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmv.v.f.nxv1f16( + , half, iXLen); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv1f16( + undef, half %0, iXLen %1) @@ -22,6 +24,7 @@ } declare @llvm.riscv.vfmv.v.f.nxv2f16( + , half, iXLen); @@ -33,6 +36,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv2f16( + undef, half %0, iXLen %1) @@ -40,6 +44,7 @@ } declare @llvm.riscv.vfmv.v.f.nxv4f16( + , half, iXLen); @@ -51,6 +56,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv4f16( + undef, half %0, iXLen %1) @@ -58,6 +64,7 @@ } declare @llvm.riscv.vfmv.v.f.nxv8f16( + , half, iXLen); @@ -69,6 +76,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv8f16( + undef, half %0, iXLen %1) @@ -76,6 +84,7 @@ } declare @llvm.riscv.vfmv.v.f.nxv16f16( + , half, iXLen); @@ -87,6 +96,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv16f16( + undef, half %0, iXLen %1) @@ -94,6 +104,7 @@ } declare @llvm.riscv.vfmv.v.f.nxv32f16( + , half, iXLen); @@ -105,6 +116,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv32f16( + undef, half %0, iXLen %1) @@ -112,6 +124,7 @@ } declare @llvm.riscv.vfmv.v.f.nxv1f32( + , float, iXLen); @@ -123,6 +136,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv1f32( + undef, float %0, iXLen %1) @@ -130,6 +144,7 @@ } declare @llvm.riscv.vfmv.v.f.nxv2f32( + , float, iXLen); @@ -141,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv2f32( + undef, float %0, iXLen %1) @@ -148,6 +164,7 @@ } declare @llvm.riscv.vfmv.v.f.nxv4f32( + , float, iXLen); @@ -159,6 +176,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv4f32( + undef, float %0, iXLen %1) @@ -166,6 +184,7 @@ } declare @llvm.riscv.vfmv.v.f.nxv8f32( + , float, iXLen); @@ -177,6 +196,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv8f32( + undef, float %0, iXLen %1) @@ -184,6 +204,7 @@ } declare @llvm.riscv.vfmv.v.f.nxv16f32( + , float, iXLen); @@ -195,6 +216,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv16f32( + undef, float %0, iXLen %1) @@ -202,6 +224,7 @@ } declare @llvm.riscv.vfmv.v.f.nxv1f64( + , double, iXLen); @@ -213,6 +236,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv1f64( + undef, double %0, iXLen %1) @@ -220,6 +244,7 @@ } declare @llvm.riscv.vfmv.v.f.nxv2f64( + , double, iXLen); @@ -231,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv2f64( + undef, double %0, iXLen %1) @@ -238,6 +264,7 @@ } declare @llvm.riscv.vfmv.v.f.nxv4f64( + , double, iXLen); @@ -249,6 +276,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv4f64( + undef, double %0, iXLen %1) @@ -256,6 +284,7 @@ } declare @llvm.riscv.vfmv.v.f.nxv8f64( + , double, iXLen); @@ -267,6 +296,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv8f64( + undef, double %0, iXLen %1) @@ -281,6 +311,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv1f16( + undef, half 0.0, iXLen %0) @@ -295,6 +326,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv2f16( + undef, half 0.0, iXLen %0) @@ -309,6 +341,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv4f16( + undef, half 0.0, iXLen %0) @@ -323,6 +356,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv8f16( + undef, half 0.0, iXLen %0) @@ -337,6 +371,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv16f16( + undef, half 0.0, iXLen %0) @@ -351,6 +386,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv32f16( + undef, half 0.0, iXLen %0) @@ -365,6 +401,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv1f32( + undef, float 0.0, iXLen %0) @@ -379,6 +416,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv2f32( + undef, float 0.0, iXLen %0) @@ -393,6 +431,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv4f32( + undef, float 0.0, iXLen %0) @@ -407,6 +446,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv8f32( + undef, float 0.0, iXLen %0) @@ -421,6 +461,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv16f32( + undef, float 0.0, iXLen %0) @@ -435,6 +476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv1f64( + undef, double 0.0, iXLen %0) @@ -449,6 +491,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv2f64( + undef, double 0.0, iXLen %0) @@ -463,6 +506,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv4f64( + undef, double 0.0, iXLen %0) @@ -477,6 +521,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv8f64( + undef, double 0.0, iXLen %0) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmv.v.v.nxv1i8( + , , i32); @@ -13,6 +14,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv1i8( + undef, %0, i32 %1) @@ -20,6 +22,7 @@ } declare @llvm.riscv.vmv.v.v.nxv2i8( + , , i32); @@ -31,6 +34,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv2i8( + undef, %0, i32 %1) @@ -38,6 +42,7 @@ } declare @llvm.riscv.vmv.v.v.nxv4i8( + , , i32); @@ -49,6 +54,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv4i8( + undef, %0, i32 %1) @@ -56,6 +62,7 @@ } declare @llvm.riscv.vmv.v.v.nxv8i8( + , , i32); @@ -67,6 +74,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv8i8( + undef, %0, i32 %1) @@ -74,6 +82,7 @@ } declare @llvm.riscv.vmv.v.v.nxv16i8( + , , i32); @@ -85,6 +94,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv16i8( + undef, %0, i32 %1) @@ -92,6 +102,7 @@ } declare @llvm.riscv.vmv.v.v.nxv32i8( + , , i32); @@ -103,6 +114,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv32i8( + undef, %0, i32 %1) @@ -110,6 +122,7 @@ } declare @llvm.riscv.vmv.v.v.nxv64i8( + , , i32); @@ -121,6 +134,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv64i8( + undef, %0, i32 %1) @@ -128,6 +142,7 @@ } declare @llvm.riscv.vmv.v.v.nxv1i16( + , , i32); @@ -139,6 +154,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv1i16( + undef, %0, i32 %1) @@ -146,6 +162,7 @@ } declare @llvm.riscv.vmv.v.v.nxv2i16( + , , i32); @@ -157,6 +174,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv2i16( + undef, %0, i32 %1) @@ -164,6 +182,7 @@ } declare @llvm.riscv.vmv.v.v.nxv4i16( + , , i32); @@ -175,6 +194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv4i16( + undef, %0, i32 %1) @@ -182,6 +202,7 @@ } declare @llvm.riscv.vmv.v.v.nxv8i16( + , , i32); @@ -193,6 +214,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv8i16( + undef, %0, i32 %1) @@ -200,6 +222,7 @@ } declare @llvm.riscv.vmv.v.v.nxv16i16( + , , i32); @@ -211,6 +234,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv16i16( + undef, %0, i32 %1) @@ -218,6 +242,7 @@ } declare @llvm.riscv.vmv.v.v.nxv32i16( + , , i32); @@ -229,6 +254,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv32i16( + undef, %0, i32 %1) @@ -236,6 +262,7 @@ } declare @llvm.riscv.vmv.v.v.nxv1i32( + , , i32); @@ -247,6 +274,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv1i32( + undef, %0, i32 %1) @@ -254,6 +282,7 @@ } declare @llvm.riscv.vmv.v.v.nxv2i32( + , , i32); @@ -265,6 +294,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv2i32( + undef, %0, i32 %1) @@ -272,6 +302,7 @@ } declare @llvm.riscv.vmv.v.v.nxv4i32( + , , i32); @@ -283,6 +314,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv4i32( + undef, %0, i32 %1) @@ -290,6 +322,7 @@ } declare @llvm.riscv.vmv.v.v.nxv8i32( + , , i32); @@ -301,6 +334,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv8i32( + undef, %0, i32 %1) @@ -308,6 +342,7 @@ } declare @llvm.riscv.vmv.v.v.nxv16i32( + , , i32); @@ -319,6 +354,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv16i32( + undef, %0, i32 %1) @@ -326,6 +362,7 @@ } declare @llvm.riscv.vmv.v.v.nxv1i64( + , , i32); @@ -337,6 +374,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv1i64( + undef, %0, i32 %1) @@ -344,6 +382,7 @@ } declare @llvm.riscv.vmv.v.v.nxv2i64( + , , i32); @@ -355,6 +394,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv2i64( + undef, %0, i32 %1) @@ -362,6 +402,7 @@ } declare @llvm.riscv.vmv.v.v.nxv4i64( + , , i32); @@ -373,6 +414,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv4i64( + undef, %0, i32 %1) @@ -380,6 +422,7 @@ } declare @llvm.riscv.vmv.v.v.nxv8i64( + , , i32); @@ -391,6 +434,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv8i64( + undef, %0, i32 %1) @@ -398,6 +442,7 @@ } declare @llvm.riscv.vmv.v.v.nxv1f16( + , , i32); @@ -409,6 +454,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv1f16( + undef, %0, i32 %1) @@ -416,6 +462,7 @@ } declare @llvm.riscv.vmv.v.v.nxv2f16( + , , i32); @@ -427,6 +474,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv2f16( + undef, %0, i32 %1) @@ -434,6 +482,7 @@ } declare @llvm.riscv.vmv.v.v.nxv4f16( + , , i32); @@ -445,6 +494,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv4f16( + undef, %0, i32 %1) @@ -452,6 +502,7 @@ } declare @llvm.riscv.vmv.v.v.nxv8f16( + , , i32); @@ -463,6 +514,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv8f16( + undef, %0, i32 %1) @@ -470,6 +522,7 @@ } declare @llvm.riscv.vmv.v.v.nxv16f16( + , , i32); @@ -481,6 +534,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv16f16( + undef, %0, i32 %1) @@ -488,6 +542,7 @@ } declare @llvm.riscv.vmv.v.v.nxv32f16( + , , i32); @@ -499,6 +554,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv32f16( + undef, %0, i32 %1) @@ -506,6 +562,7 @@ } declare @llvm.riscv.vmv.v.v.nxv1f32( + , , i32); @@ -517,6 +574,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv1f32( + undef, %0, i32 %1) @@ -524,6 +582,7 @@ } declare @llvm.riscv.vmv.v.v.nxv2f32( + , , i32); @@ -535,6 +594,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv2f32( + undef, %0, i32 %1) @@ -542,6 +602,7 @@ } declare @llvm.riscv.vmv.v.v.nxv4f32( + , , i32); @@ -553,6 +614,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv4f32( + undef, %0, i32 %1) @@ -560,6 +622,7 @@ } declare @llvm.riscv.vmv.v.v.nxv8f32( + , , i32); @@ -571,6 +634,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv8f32( + undef, %0, i32 %1) @@ -578,6 +642,7 @@ } declare @llvm.riscv.vmv.v.v.nxv16f32( + , , i32); @@ -589,6 +654,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv16f32( + undef, %0, i32 %1) @@ -596,6 +662,7 @@ } declare @llvm.riscv.vmv.v.v.nxv1f64( + , , i32); @@ -607,6 +674,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv1f64( + undef, %0, i32 %1) @@ -614,6 +682,7 @@ } declare @llvm.riscv.vmv.v.v.nxv2f64( + , , i32); @@ -625,6 +694,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv2f64( + undef, %0, i32 %1) @@ -632,6 +702,7 @@ } declare @llvm.riscv.vmv.v.v.nxv4f64( + , , i32); @@ -643,6 +714,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv4f64( + undef, %0, i32 %1) @@ -650,6 +722,7 @@ } declare @llvm.riscv.vmv.v.v.nxv8f64( + , , i32); @@ -661,6 +734,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv8f64( + undef, %0, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmv.v.v.nxv1i8( + , , i64); @@ -13,6 +14,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv1i8( + undef, %0, i64 %1) @@ -20,6 +22,7 @@ } declare @llvm.riscv.vmv.v.v.nxv2i8( + , , i64); @@ -31,6 +34,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv2i8( + undef, %0, i64 %1) @@ -38,6 +42,7 @@ } declare @llvm.riscv.vmv.v.v.nxv4i8( + , , i64); @@ -49,6 +54,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv4i8( + undef, %0, i64 %1) @@ -56,6 +62,7 @@ } declare @llvm.riscv.vmv.v.v.nxv8i8( + , , i64); @@ -67,6 +74,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv8i8( + undef, %0, i64 %1) @@ -74,6 +82,7 @@ } declare @llvm.riscv.vmv.v.v.nxv16i8( + , , i64); @@ -85,6 +94,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv16i8( + undef, %0, i64 %1) @@ -92,6 +102,7 @@ } declare @llvm.riscv.vmv.v.v.nxv32i8( + , , i64); @@ -103,6 +114,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv32i8( + undef, %0, i64 %1) @@ -110,6 +122,7 @@ } declare @llvm.riscv.vmv.v.v.nxv64i8( + , , i64); @@ -121,6 +134,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv64i8( + undef, %0, i64 %1) @@ -128,6 +142,7 @@ } declare @llvm.riscv.vmv.v.v.nxv1i16( + , , i64); @@ -139,6 +154,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv1i16( + undef, %0, i64 %1) @@ -146,6 +162,7 @@ } declare @llvm.riscv.vmv.v.v.nxv2i16( + , , i64); @@ -157,6 +174,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv2i16( + undef, %0, i64 %1) @@ -164,6 +182,7 @@ } declare @llvm.riscv.vmv.v.v.nxv4i16( + , , i64); @@ -175,6 +194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv4i16( + undef, %0, i64 %1) @@ -182,6 +202,7 @@ } declare @llvm.riscv.vmv.v.v.nxv8i16( + , , i64); @@ -193,6 +214,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv8i16( + undef, %0, i64 %1) @@ -200,6 +222,7 @@ } declare @llvm.riscv.vmv.v.v.nxv16i16( + , , i64); @@ -211,6 +234,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv16i16( + undef, %0, i64 %1) @@ -218,6 +242,7 @@ } declare @llvm.riscv.vmv.v.v.nxv32i16( + , , i64); @@ -229,6 +254,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv32i16( + undef, %0, i64 %1) @@ -236,6 +262,7 @@ } declare @llvm.riscv.vmv.v.v.nxv1i32( + , , i64); @@ -247,6 +274,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv1i32( + undef, %0, i64 %1) @@ -254,6 +282,7 @@ } declare @llvm.riscv.vmv.v.v.nxv2i32( + , , i64); @@ -265,6 +294,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv2i32( + undef, %0, i64 %1) @@ -272,6 +302,7 @@ } declare @llvm.riscv.vmv.v.v.nxv4i32( + , , i64); @@ -283,6 +314,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv4i32( + undef, %0, i64 %1) @@ -290,6 +322,7 @@ } declare @llvm.riscv.vmv.v.v.nxv8i32( + , , i64); @@ -301,6 +334,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv8i32( + undef, %0, i64 %1) @@ -308,6 +342,7 @@ } declare @llvm.riscv.vmv.v.v.nxv16i32( + , , i64); @@ -319,6 +354,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv16i32( + undef, %0, i64 %1) @@ -326,6 +362,7 @@ } declare @llvm.riscv.vmv.v.v.nxv1i64( + , , i64); @@ -337,6 +374,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv1i64( + undef, %0, i64 %1) @@ -344,6 +382,7 @@ } declare @llvm.riscv.vmv.v.v.nxv2i64( + , , i64); @@ -355,6 +394,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv2i64( + undef, %0, i64 %1) @@ -362,6 +402,7 @@ } declare @llvm.riscv.vmv.v.v.nxv4i64( + , , i64); @@ -373,6 +414,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv4i64( + undef, %0, i64 %1) @@ -380,6 +422,7 @@ } declare @llvm.riscv.vmv.v.v.nxv8i64( + , , i64); @@ -391,6 +434,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv8i64( + undef, %0, i64 %1) @@ -398,6 +442,7 @@ } declare @llvm.riscv.vmv.v.v.nxv1f16( + , , i64); @@ -409,6 +454,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv1f16( + undef, %0, i64 %1) @@ -416,6 +462,7 @@ } declare @llvm.riscv.vmv.v.v.nxv2f16( + , , i64); @@ -427,6 +474,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv2f16( + undef, %0, i64 %1) @@ -434,6 +482,7 @@ } declare @llvm.riscv.vmv.v.v.nxv4f16( + , , i64); @@ -445,6 +494,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv4f16( + undef, %0, i64 %1) @@ -452,6 +502,7 @@ } declare @llvm.riscv.vmv.v.v.nxv8f16( + , , i64); @@ -463,6 +514,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv8f16( + undef, %0, i64 %1) @@ -470,6 +522,7 @@ } declare @llvm.riscv.vmv.v.v.nxv16f16( + , , i64); @@ -481,6 +534,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv16f16( + undef, %0, i64 %1) @@ -488,6 +542,7 @@ } declare @llvm.riscv.vmv.v.v.nxv32f16( + , , i64); @@ -499,6 +554,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv32f16( + undef, %0, i64 %1) @@ -506,6 +562,7 @@ } declare @llvm.riscv.vmv.v.v.nxv1f32( + , , i64); @@ -517,6 +574,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv1f32( + undef, %0, i64 %1) @@ -524,6 +582,7 @@ } declare @llvm.riscv.vmv.v.v.nxv2f32( + , , i64); @@ -535,6 +594,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv2f32( + undef, %0, i64 %1) @@ -542,6 +602,7 @@ } declare @llvm.riscv.vmv.v.v.nxv4f32( + , , i64); @@ -553,6 +614,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv4f32( + undef, %0, i64 %1) @@ -560,6 +622,7 @@ } declare @llvm.riscv.vmv.v.v.nxv8f32( + , , i64); @@ -571,6 +634,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv8f32( + undef, %0, i64 %1) @@ -578,6 +642,7 @@ } declare @llvm.riscv.vmv.v.v.nxv16f32( + , , i64); @@ -589,6 +654,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv16f32( + undef, %0, i64 %1) @@ -596,6 +662,7 @@ } declare @llvm.riscv.vmv.v.v.nxv1f64( + , , i64); @@ -607,6 +674,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv1f64( + undef, %0, i64 %1) @@ -614,6 +682,7 @@ } declare @llvm.riscv.vmv.v.v.nxv2f64( + , , i64); @@ -625,6 +694,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv2f64( + undef, %0, i64 %1) @@ -632,6 +702,7 @@ } declare @llvm.riscv.vmv.v.v.nxv4f64( + , , i64); @@ -643,6 +714,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv4f64( + undef, %0, i64 %1) @@ -650,6 +722,7 @@ } declare @llvm.riscv.vmv.v.v.nxv8f64( + , , i64); @@ -661,6 +734,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.v.nxv8f64( + undef, %0, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmv.v.x.nxv1i8( + , i8, i32); @@ -13,6 +14,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv1i8( + undef, i8 %0, i32 %1) @@ -20,6 +22,7 @@ } declare @llvm.riscv.vmv.v.x.nxv2i8( + , i8, i32); @@ -31,6 +34,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv2i8( + undef, i8 %0, i32 %1) @@ -38,6 +42,7 @@ } declare @llvm.riscv.vmv.v.x.nxv4i8( + , i8, i32); @@ -49,6 +54,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv4i8( + undef, i8 %0, i32 %1) @@ -56,6 +62,7 @@ } declare @llvm.riscv.vmv.v.x.nxv8i8( + , i8, i32); @@ -67,6 +74,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv8i8( + undef, i8 %0, i32 %1) @@ -74,6 +82,7 @@ } declare @llvm.riscv.vmv.v.x.nxv16i8( + , i8, i32); @@ -85,6 +94,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv16i8( + undef, i8 %0, i32 %1) @@ -92,6 +102,7 @@ } declare @llvm.riscv.vmv.v.x.nxv32i8( + , i8, i32); @@ -103,6 +114,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv32i8( + undef, i8 %0, i32 %1) @@ -110,6 +122,7 @@ } declare @llvm.riscv.vmv.v.x.nxv64i8( + , i8, i32); @@ -121,6 +134,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv64i8( + undef, i8 %0, i32 %1) @@ -128,6 +142,7 @@ } declare @llvm.riscv.vmv.v.x.nxv1i16( + , i16, i32); @@ -139,6 +154,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv1i16( + undef, i16 %0, i32 %1) @@ -146,6 +162,7 @@ } declare @llvm.riscv.vmv.v.x.nxv2i16( + , i16, i32); @@ -157,6 +174,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv2i16( + undef, i16 %0, i32 %1) @@ -164,6 +182,7 @@ } declare @llvm.riscv.vmv.v.x.nxv4i16( + , i16, i32); @@ -175,6 +194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv4i16( + undef, i16 %0, i32 %1) @@ -182,6 +202,7 @@ } declare @llvm.riscv.vmv.v.x.nxv8i16( + , i16, i32); @@ -193,6 +214,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv8i16( + undef, i16 %0, i32 %1) @@ -200,6 +222,7 @@ } declare @llvm.riscv.vmv.v.x.nxv16i16( + , i16, i32); @@ -211,6 +234,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv16i16( + undef, i16 %0, i32 %1) @@ -218,6 +242,7 @@ } declare @llvm.riscv.vmv.v.x.nxv32i16( + , i16, i32); @@ -229,6 +254,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv32i16( + undef, i16 %0, i32 %1) @@ -236,6 +262,7 @@ } declare @llvm.riscv.vmv.v.x.nxv1i32( + , i32, i32); @@ -247,6 +274,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv1i32( + undef, i32 %0, i32 %1) @@ -254,6 +282,7 @@ } declare @llvm.riscv.vmv.v.x.nxv2i32( + , i32, i32); @@ -265,6 +294,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv2i32( + undef, i32 %0, i32 %1) @@ -272,6 +302,7 @@ } declare @llvm.riscv.vmv.v.x.nxv4i32( + , i32, i32); @@ -283,6 +314,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv4i32( + undef, i32 %0, i32 %1) @@ -290,6 +322,7 @@ } declare @llvm.riscv.vmv.v.x.nxv8i32( + , i32, i32); @@ -301,6 +334,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv8i32( + undef, i32 %0, i32 %1) @@ -308,6 +342,7 @@ } declare @llvm.riscv.vmv.v.x.nxv16i32( + , i32, i32); @@ -319,6 +354,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv16i32( + undef, i32 %0, i32 %1) @@ -326,6 +362,7 @@ } declare @llvm.riscv.vmv.v.x.nxv1i64( + , i64, i32); @@ -342,6 +379,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv1i64( + undef, i64 %0, i32 %1) @@ -349,6 +387,7 @@ } declare @llvm.riscv.vmv.v.x.nxv2i64( + , i64, i32); @@ -365,6 +404,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv2i64( + undef, i64 %0, i32 %1) @@ -372,6 +412,7 @@ } declare @llvm.riscv.vmv.v.x.nxv4i64( + , i64, i32); @@ -388,6 +429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv4i64( + undef, i64 %0, i32 %1) @@ -395,6 +437,7 @@ } declare @llvm.riscv.vmv.v.x.nxv8i64( + , i64, i32); @@ -411,6 +454,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv8i64( + undef, i64 %0, i32 %1) @@ -425,6 +469,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv1i8( + undef, i8 9, i32 %0) @@ -439,6 +484,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv2i8( + undef, i8 9, i32 %0) @@ -453,6 +499,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv4i8( + undef, i8 9, i32 %0) @@ -467,6 +514,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv8i8( + undef, i8 9, i32 %0) @@ -481,6 +529,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv16i8( + undef, i8 9, i32 %0) @@ -495,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv32i8( + undef, i8 9, i32 %0) @@ -509,6 +559,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv64i8( + undef, i8 9, i32 %0) @@ -523,6 +574,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv1i16( + undef, i16 9, i32 %0) @@ -537,6 +589,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv2i16( + undef, i16 9, i32 %0) @@ -551,6 +604,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv4i16( + undef, i16 9, i32 %0) @@ -565,6 +619,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv8i16( + undef, i16 9, i32 %0) @@ -579,6 +634,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv16i16( + undef, i16 9, i32 %0) @@ -593,6 +649,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv32i16( + undef, i16 9, i32 %0) @@ -607,6 +664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv1i32( + undef, i32 9, i32 %0) @@ -621,6 +679,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv2i32( + undef, i32 9, i32 %0) @@ -635,6 +694,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv4i32( + undef, i32 9, i32 %0) @@ -649,6 +709,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv8i32( + undef, i32 9, i32 %0) @@ -663,6 +724,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv16i32( + undef, i32 9, i32 %0) @@ -677,6 +739,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv1i64( + undef, i64 9, i32 %0) @@ -691,6 +754,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv2i64( + undef, i64 9, i32 %0) @@ -705,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv4i64( + undef, i64 9, i32 %0) @@ -719,6 +784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv8i64( + undef, i64 9, i32 %0) @@ -733,6 +799,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv1i64( + undef, i64 12884901891, i32 -1) @@ -747,6 +814,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv2i64( + undef, i64 12884901891, i32 -1) @@ -761,6 +829,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv4i64( + undef, i64 12884901891, i32 -1) @@ -775,6 +844,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv8i64( + undef, i64 12884901891, i32 -1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmv.v.x.nxv1i8( + , i8, i64); @@ -13,6 +14,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv1i8( + undef, i8 %0, i64 %1) @@ -20,6 +22,7 @@ } declare @llvm.riscv.vmv.v.x.nxv2i8( + , i8, i64); @@ -31,6 +34,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv2i8( + undef, i8 %0, i64 %1) @@ -38,6 +42,7 @@ } declare @llvm.riscv.vmv.v.x.nxv4i8( + , i8, i64); @@ -49,6 +54,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv4i8( + undef, i8 %0, i64 %1) @@ -56,6 +62,7 @@ } declare @llvm.riscv.vmv.v.x.nxv8i8( + , i8, i64); @@ -67,6 +74,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv8i8( + undef, i8 %0, i64 %1) @@ -74,6 +82,7 @@ } declare @llvm.riscv.vmv.v.x.nxv16i8( + , i8, i64); @@ -85,6 +94,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv16i8( + undef, i8 %0, i64 %1) @@ -92,6 +102,7 @@ } declare @llvm.riscv.vmv.v.x.nxv32i8( + , i8, i64); @@ -103,6 +114,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv32i8( + undef, i8 %0, i64 %1) @@ -110,6 +122,7 @@ } declare @llvm.riscv.vmv.v.x.nxv64i8( + , i8, i64); @@ -121,6 +134,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv64i8( + undef, i8 %0, i64 %1) @@ -128,6 +142,7 @@ } declare @llvm.riscv.vmv.v.x.nxv1i16( + , i16, i64); @@ -139,6 +154,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv1i16( + undef, i16 %0, i64 %1) @@ -146,6 +162,7 @@ } declare @llvm.riscv.vmv.v.x.nxv2i16( + , i16, i64); @@ -157,6 +174,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv2i16( + undef, i16 %0, i64 %1) @@ -164,6 +182,7 @@ } declare @llvm.riscv.vmv.v.x.nxv4i16( + , i16, i64); @@ -175,6 +194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv4i16( + undef, i16 %0, i64 %1) @@ -182,6 +202,7 @@ } declare @llvm.riscv.vmv.v.x.nxv8i16( + , i16, i64); @@ -193,6 +214,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv8i16( + undef, i16 %0, i64 %1) @@ -200,6 +222,7 @@ } declare @llvm.riscv.vmv.v.x.nxv16i16( + , i16, i64); @@ -211,6 +234,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv16i16( + undef, i16 %0, i64 %1) @@ -218,6 +242,7 @@ } declare @llvm.riscv.vmv.v.x.nxv32i16( + , i16, i64); @@ -229,6 +254,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv32i16( + undef, i16 %0, i64 %1) @@ -236,6 +262,7 @@ } declare @llvm.riscv.vmv.v.x.nxv1i32( + , i32, i64); @@ -247,6 +274,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv1i32( + undef, i32 %0, i64 %1) @@ -254,6 +282,7 @@ } declare @llvm.riscv.vmv.v.x.nxv2i32( + , i32, i64); @@ -265,6 +294,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv2i32( + undef, i32 %0, i64 %1) @@ -272,6 +302,7 @@ } declare @llvm.riscv.vmv.v.x.nxv4i32( + , i32, i64); @@ -283,6 +314,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv4i32( + undef, i32 %0, i64 %1) @@ -290,6 +322,7 @@ } declare @llvm.riscv.vmv.v.x.nxv8i32( + , i32, i64); @@ -301,6 +334,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv8i32( + undef, i32 %0, i64 %1) @@ -308,6 +342,7 @@ } declare @llvm.riscv.vmv.v.x.nxv16i32( + , i32, i64); @@ -319,6 +354,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv16i32( + undef, i32 %0, i64 %1) @@ -326,6 +362,7 @@ } declare @llvm.riscv.vmv.v.x.nxv1i64( + , i64, i64); @@ -337,6 +374,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv1i64( + undef, i64 %0, i64 %1) @@ -344,6 +382,7 @@ } declare @llvm.riscv.vmv.v.x.nxv2i64( + , i64, i64); @@ -355,6 +394,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv2i64( + undef, i64 %0, i64 %1) @@ -362,6 +402,7 @@ } declare @llvm.riscv.vmv.v.x.nxv4i64( + , i64, i64); @@ -373,6 +414,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv4i64( + undef, i64 %0, i64 %1) @@ -380,6 +422,7 @@ } declare @llvm.riscv.vmv.v.x.nxv8i64( + , i64, i64); @@ -391,6 +434,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv8i64( + undef, i64 %0, i64 %1) @@ -405,6 +449,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv1i8( + undef, i8 9, i64 %0) @@ -419,6 +464,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv2i8( + undef, i8 9, i64 %0) @@ -433,6 +479,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv4i8( + undef, i8 9, i64 %0) @@ -447,6 +494,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv8i8( + undef, i8 9, i64 %0) @@ -461,6 +509,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv16i8( + undef, i8 9, i64 %0) @@ -475,6 +524,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv32i8( + undef, i8 9, i64 %0) @@ -489,6 +539,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv64i8( + undef, i8 9, i64 %0) @@ -503,6 +554,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv1i16( + undef, i16 9, i64 %0) @@ -517,6 +569,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv2i16( + undef, i16 9, i64 %0) @@ -531,6 +584,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv4i16( + undef, i16 9, i64 %0) @@ -545,6 +599,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv8i16( + undef, i16 9, i64 %0) @@ -559,6 +614,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv16i16( + undef, i16 9, i64 %0) @@ -573,6 +629,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv32i16( + undef, i16 9, i64 %0) @@ -587,6 +644,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv1i32( + undef, i32 9, i64 %0) @@ -601,6 +659,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv2i32( + undef, i32 9, i64 %0) @@ -615,6 +674,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv4i32( + undef, i32 9, i64 %0) @@ -629,6 +689,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv8i32( + undef, i32 9, i64 %0) @@ -643,6 +704,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv16i32( + undef, i32 9, i64 %0) @@ -657,6 +719,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv1i64( + undef, i64 9, i64 %0) @@ -671,6 +734,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv2i64( + undef, i64 9, i64 %0) @@ -685,6 +749,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv4i64( + undef, i64 9, i64 %0) @@ -699,6 +764,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv8i64( + undef, i64 9, i64 %0) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll @@ -14,8 +14,8 @@ declare @llvm.riscv.vfmul.nxv1f64.nxv1f64(, , , i64) -declare @llvm.riscv.vfmv.v.f.nxv1f64.f64(double, i64) -declare @llvm.riscv.vfmv.v.f.nxv2f32.f32(float, i64) +declare @llvm.riscv.vfmv.v.f.nxv1f64.f64(, double, i64) +declare @llvm.riscv.vfmv.v.f.nxv2f32.f32( , float, i64) declare void @llvm.riscv.vse.nxv1f64(, * nocapture, i64) declare void @llvm.riscv.vse.nxv2f32(, * nocapture, i64) @@ -156,16 +156,16 @@ br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %0 = tail call @llvm.riscv.vfmv.v.f.nxv1f64.f64(double 1.000000e+00, i64 %avl) - %1 = tail call @llvm.riscv.vfmv.v.f.nxv1f64.f64(double 2.000000e+00, i64 %avl) + %0 = tail call @llvm.riscv.vfmv.v.f.nxv1f64.f64( undef, double 1.000000e+00, i64 %avl) + %1 = tail call @llvm.riscv.vfmv.v.f.nxv1f64.f64( undef, double 2.000000e+00, i64 %avl) %2 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %0, %1, i64 %avl) %3 = bitcast i8* @scratch to * tail call void @llvm.riscv.vse.nxv1f64( %2, * %3, i64 %avl) br label %if.end if.else: ; preds = %entry - %4 = tail call @llvm.riscv.vfmv.v.f.nxv2f32.f32(float 1.000000e+00, i64 %avl) - %5 = tail call @llvm.riscv.vfmv.v.f.nxv2f32.f32(float 2.000000e+00, i64 %avl) + %4 = tail call @llvm.riscv.vfmv.v.f.nxv2f32.f32( undef, float 1.000000e+00, i64 %avl) + %5 = tail call @llvm.riscv.vfmv.v.f.nxv2f32.f32( undef, float 2.000000e+00, i64 %avl) %6 = tail call @llvm.riscv.vfadd.nxv2f32.nxv2f32( undef, %4, %5, i64 %avl) %7 = bitcast i8* @scratch to * tail call void @llvm.riscv.vse.nxv2f32( %6, * %7, i64 %avl) @@ -299,8 +299,8 @@ if.then4: ; preds = %if.end %3 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0) - %4 = tail call @llvm.riscv.vfmv.v.f.nxv1f64.f64(double 1.000000e+00, i64 %3) - %5 = tail call @llvm.riscv.vfmv.v.f.nxv1f64.f64(double 2.000000e+00, i64 %3) + %4 = tail call @llvm.riscv.vfmv.v.f.nxv1f64.f64( undef, double 1.000000e+00, i64 %3) + %5 = tail call @llvm.riscv.vfmv.v.f.nxv1f64.f64( undef, double 2.000000e+00, i64 %3) %6 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %4, %5, i64 %3) %7 = bitcast i8* @scratch to * tail call void @llvm.riscv.vse.nxv1f64( %6, * %7, i64 %3) @@ -308,8 +308,8 @@ if.else5: ; preds = %if.end %8 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 0) - %9 = tail call @llvm.riscv.vfmv.v.f.nxv2f32.f32(float 1.000000e+00, i64 %8) - %10 = tail call @llvm.riscv.vfmv.v.f.nxv2f32.f32(float 2.000000e+00, i64 %8) + %9 = tail call @llvm.riscv.vfmv.v.f.nxv2f32.f32( undef, float 1.000000e+00, i64 %8) + %10 = tail call @llvm.riscv.vfmv.v.f.nxv2f32.f32( undef, float 2.000000e+00, i64 %8) %11 = tail call @llvm.riscv.vfadd.nxv2f32.nxv2f32( undef, %9, %10, i64 %8) %12 = bitcast i8* @scratch to * tail call void @llvm.riscv.vse.nxv2f32( %11, * %12, i64 %8) diff --git a/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll b/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll --- a/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll +++ b/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll @@ -7,9 +7,9 @@ declare float @llvm.riscv.vfmv.f.s.nxv1f32() declare double @llvm.riscv.vfmv.f.s.nxv1f64() -declare @llvm.riscv.vfmv.v.f.nxv1f16(half, i64); -declare @llvm.riscv.vfmv.v.f.nxv1f32(float, i64); -declare @llvm.riscv.vfmv.v.f.nxv1f64(double, i64); +declare @llvm.riscv.vfmv.v.f.nxv1f16(, half, i64); +declare @llvm.riscv.vfmv.v.f.nxv1f32(, float, i64); +declare @llvm.riscv.vfmv.v.f.nxv1f64(, double, i64); define @intrinsic_vfmv.f.s_s_nxv1f16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f16: @@ -28,7 +28,7 @@ entry: %a = call half @llvm.riscv.vfmv.f.s.nxv1f16( %0) tail call void asm sideeffect "", "~{f0_d},~{f1_d},~{f2_d},~{f3_d},~{f4_d},~{f5_d},~{f6_d},~{f7_d},~{f8_d},~{f9_d},~{f10_d},~{f11_d},~{f12_d},~{f13_d},~{f14_d},~{f15_d},~{f16_d},~{f17_d},~{f18_d},~{f19_d},~{f20_d},~{f21_d},~{f22_d},~{f23_d},~{f24_d},~{f25_d},~{f26_d},~{f27_d},~{f28_d},~{f29_d},~{f30_d},~{f31_d}"() - %b = call @llvm.riscv.vfmv.v.f.nxv1f16(half %a, i64 %1) + %b = call @llvm.riscv.vfmv.v.f.nxv1f16( undef, half %a, i64 %1) ret %b } @@ -49,7 +49,7 @@ entry: %a = call float @llvm.riscv.vfmv.f.s.nxv1f32( %0) tail call void asm sideeffect "", "~{f0_d},~{f1_d},~{f2_d},~{f3_d},~{f4_d},~{f5_d},~{f6_d},~{f7_d},~{f8_d},~{f9_d},~{f10_d},~{f11_d},~{f12_d},~{f13_d},~{f14_d},~{f15_d},~{f16_d},~{f17_d},~{f18_d},~{f19_d},~{f20_d},~{f21_d},~{f22_d},~{f23_d},~{f24_d},~{f25_d},~{f26_d},~{f27_d},~{f28_d},~{f29_d},~{f30_d},~{f31_d}"() - %b = call @llvm.riscv.vfmv.v.f.nxv1f32(float %a, i64 %1) + %b = call @llvm.riscv.vfmv.v.f.nxv1f32( undef, float %a, i64 %1) ret %b } @@ -70,6 +70,6 @@ entry: %a = call double @llvm.riscv.vfmv.f.s.nxv1f64( %0) tail call void asm sideeffect "", "~{f0_d},~{f1_d},~{f2_d},~{f3_d},~{f4_d},~{f5_d},~{f6_d},~{f7_d},~{f8_d},~{f9_d},~{f10_d},~{f11_d},~{f12_d},~{f13_d},~{f14_d},~{f15_d},~{f16_d},~{f17_d},~{f18_d},~{f19_d},~{f20_d},~{f21_d},~{f22_d},~{f23_d},~{f24_d},~{f25_d},~{f26_d},~{f27_d},~{f28_d},~{f29_d},~{f30_d},~{f31_d}"() - %b = call @llvm.riscv.vfmv.v.f.nxv1f64(double %a, i64 %1) + %b = call @llvm.riscv.vfmv.v.f.nxv1f64( undef, double %a, i64 %1) ret %b }