diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -443,7 +443,8 @@ let HasMaskedOffOperand = false; } -let HasMaskedOffOperand = false in { +let UnMaskedPolicy = HasPolicyOperand, + HasMaskedOffOperand = false in { multiclass RVVSlideBuiltinSet { defm "" : RVVOutBuiltinSet; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src, @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src, @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src, @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset, @@ -47,7 +47,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset, @@ -57,7 +57,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset, @@ -67,7 +67,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset, @@ -77,7 +77,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src, @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src, @@ -97,7 +97,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t dst, vint16m1_t src, @@ -107,7 +107,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t dst, vint16m2_t src, @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t dst, vint16m4_t src, @@ -127,7 +127,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t dst, vint16m8_t src, @@ -137,7 +137,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src, @@ -147,7 +147,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t dst, vint32m1_t src, @@ -157,7 +157,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t dst, vint32m2_t src, @@ -167,7 +167,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t dst, vint32m4_t src, @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t dst, vint32m8_t src, @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t dst, vint64m1_t src, @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t dst, vint64m2_t src, @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t dst, vint64m4_t src, @@ -217,7 +217,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t dst, vint64m8_t src, @@ -227,7 +227,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src, @@ -237,7 +237,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src, @@ -247,7 +247,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src, @@ -257,7 +257,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t dst, vuint8m1_t src, @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t dst, vuint8m2_t src, @@ -277,7 +277,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t dst, vuint8m4_t src, @@ -287,7 +287,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t dst, vuint8m8_t src, @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src, @@ -307,7 +307,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src, @@ -317,7 +317,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t dst, vuint16m1_t src, @@ -327,7 +327,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t dst, vuint16m2_t src, @@ -337,7 +337,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t dst, vuint16m4_t src, @@ -347,7 +347,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t dst, vuint16m8_t src, @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src, @@ -367,7 +367,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t dst, vuint32m1_t src, @@ -377,7 +377,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t dst, vuint32m2_t src, @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t dst, vuint32m4_t src, @@ -397,7 +397,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t dst, vuint32m8_t src, @@ -407,7 +407,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t dst, vuint64m1_t src, @@ -417,7 +417,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t dst, vuint64m2_t src, @@ -427,7 +427,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t dst, vuint64m4_t src, @@ -437,7 +437,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t dst, vuint64m8_t src, @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src, @@ -457,7 +457,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src, @@ -467,7 +467,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src, @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src, @@ -487,7 +487,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src, @@ -497,7 +497,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src, @@ -507,7 +507,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src, @@ -517,7 +517,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src, @@ -527,7 +527,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src, size_t offset, @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src, size_t offset, @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src, size_t offset, @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset, @@ -47,7 +47,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset, @@ -57,7 +57,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset, @@ -67,7 +67,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset, @@ -77,7 +77,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src, @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src, @@ -97,7 +97,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dst, vint16m1_t src, size_t offset, @@ -107,7 +107,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dst, vint16m2_t src, size_t offset, @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dst, vint16m4_t src, size_t offset, @@ -127,7 +127,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dst, vint16m8_t src, size_t offset, @@ -137,7 +137,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src, @@ -147,7 +147,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dst, vint32m1_t src, size_t offset, @@ -157,7 +157,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dst, vint32m2_t src, size_t offset, @@ -167,7 +167,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dst, vint32m4_t src, size_t offset, @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dst, vint32m8_t src, size_t offset, @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dst, vint64m1_t src, size_t offset, @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dst, vint64m2_t src, size_t offset, @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dst, vint64m4_t src, size_t offset, @@ -217,7 +217,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dst, vint64m8_t src, size_t offset, @@ -227,7 +227,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src, @@ -237,7 +237,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src, @@ -247,7 +247,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src, @@ -257,7 +257,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dst, vuint8m1_t src, size_t offset, @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dst, vuint8m2_t src, size_t offset, @@ -277,7 +277,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dst, vuint8m4_t src, size_t offset, @@ -287,7 +287,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dst, vuint8m8_t src, size_t offset, @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src, @@ -307,7 +307,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src, @@ -317,7 +317,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dst, vuint16m1_t src, @@ -327,7 +327,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dst, vuint16m2_t src, @@ -337,7 +337,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dst, vuint16m4_t src, @@ -347,7 +347,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dst, vuint16m8_t src, @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src, @@ -367,7 +367,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dst, vuint32m1_t src, @@ -377,7 +377,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dst, vuint32m2_t src, @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dst, vuint32m4_t src, @@ -397,7 +397,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dst, vuint32m8_t src, @@ -407,7 +407,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dst, vuint64m1_t src, @@ -417,7 +417,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dst, vuint64m2_t src, @@ -427,7 +427,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dst, vuint64m4_t src, @@ -437,7 +437,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dst, vuint64m8_t src, @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src, @@ -457,7 +457,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src, @@ -467,7 +467,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src, @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src, @@ -487,7 +487,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src, @@ -497,7 +497,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src, @@ -507,7 +507,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src, @@ -517,7 +517,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src, @@ -527,7 +527,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src, @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src, @@ -28,7 +28,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src, @@ -38,7 +38,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset, @@ -48,7 +48,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset, @@ -58,7 +58,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset, @@ -68,7 +68,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset, @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src, @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src, @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t dst, vint16m1_t src, @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t dst, vint16m2_t src, @@ -118,7 +118,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t dst, vint16m4_t src, @@ -128,7 +128,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t dst, vint16m8_t src, @@ -138,7 +138,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src, @@ -148,7 +148,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t dst, vint32m1_t src, @@ -158,7 +158,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t dst, vint32m2_t src, @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t dst, vint32m4_t src, @@ -178,7 +178,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t dst, vint32m8_t src, @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t dst, vint64m1_t src, @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t dst, vint64m2_t src, @@ -208,7 +208,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t dst, vint64m4_t src, @@ -218,7 +218,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t dst, vint64m8_t src, @@ -228,7 +228,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src, @@ -238,7 +238,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src, @@ -248,7 +248,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src, @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t dst, vuint8m1_t src, @@ -268,7 +268,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t dst, vuint8m2_t src, @@ -278,7 +278,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t dst, vuint8m4_t src, @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t dst, vuint8m8_t src, @@ -298,7 +298,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src, @@ -308,7 +308,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src, @@ -318,7 +318,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t dst, vuint16m1_t src, @@ -328,7 +328,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t dst, vuint16m2_t src, @@ -338,7 +338,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t dst, vuint16m4_t src, @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t dst, vuint16m8_t src, @@ -358,7 +358,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src, @@ -368,7 +368,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t dst, vuint32m1_t src, @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t dst, vuint32m2_t src, @@ -388,7 +388,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t dst, vuint32m4_t src, @@ -398,7 +398,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t dst, vuint32m8_t src, @@ -408,7 +408,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t dst, vuint64m1_t src, @@ -418,7 +418,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t dst, vuint64m2_t src, @@ -428,7 +428,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t dst, vuint64m4_t src, @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t dst, vuint64m8_t src, @@ -448,7 +448,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src, @@ -458,7 +458,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src, @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src, @@ -478,7 +478,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src, @@ -488,7 +488,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src, @@ -498,7 +498,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src, @@ -508,7 +508,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src, @@ -518,7 +518,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src, @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src, @@ -1113,7 +1113,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslidedown_vx_f16mf4 (vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { @@ -1122,7 +1122,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslidedown_vx_f16mf2 (vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { @@ -1131,7 +1131,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslidedown_vx_f16m1 (vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { @@ -1140,7 +1140,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslidedown_vx_f16m2 (vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { @@ -1149,7 +1149,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslidedown_vx_f16m4 (vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { @@ -1158,7 +1158,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslidedown_vx_f16m8 (vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src, size_t offset, @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src, size_t offset, @@ -28,7 +28,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src, size_t offset, @@ -38,7 +38,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset, @@ -48,7 +48,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset, @@ -58,7 +58,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset, @@ -68,7 +68,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset, @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src, @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src, @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dst, vint16m1_t src, size_t offset, @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dst, vint16m2_t src, size_t offset, @@ -118,7 +118,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dst, vint16m4_t src, size_t offset, @@ -128,7 +128,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dst, vint16m8_t src, size_t offset, @@ -138,7 +138,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src, @@ -148,7 +148,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dst, vint32m1_t src, size_t offset, @@ -158,7 +158,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dst, vint32m2_t src, size_t offset, @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dst, vint32m4_t src, size_t offset, @@ -178,7 +178,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dst, vint32m8_t src, size_t offset, @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dst, vint64m1_t src, size_t offset, @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dst, vint64m2_t src, size_t offset, @@ -208,7 +208,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dst, vint64m4_t src, size_t offset, @@ -218,7 +218,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dst, vint64m8_t src, size_t offset, @@ -228,7 +228,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src, @@ -238,7 +238,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src, @@ -248,7 +248,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src, @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dst, vuint8m1_t src, size_t offset, @@ -268,7 +268,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dst, vuint8m2_t src, size_t offset, @@ -278,7 +278,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dst, vuint8m4_t src, size_t offset, @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dst, vuint8m8_t src, size_t offset, @@ -298,7 +298,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src, @@ -308,7 +308,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src, @@ -318,7 +318,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dst, vuint16m1_t src, @@ -328,7 +328,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dst, vuint16m2_t src, @@ -338,7 +338,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dst, vuint16m4_t src, @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dst, vuint16m8_t src, @@ -358,7 +358,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src, @@ -368,7 +368,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dst, vuint32m1_t src, @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dst, vuint32m2_t src, @@ -388,7 +388,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dst, vuint32m4_t src, @@ -398,7 +398,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dst, vuint32m8_t src, @@ -408,7 +408,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dst, vuint64m1_t src, @@ -418,7 +418,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dst, vuint64m2_t src, @@ -428,7 +428,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dst, vuint64m4_t src, @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dst, vuint64m8_t src, @@ -448,7 +448,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src, @@ -458,7 +458,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src, @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src, @@ -478,7 +478,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src, @@ -488,7 +488,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src, @@ -498,7 +498,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src, @@ -508,7 +508,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src, @@ -518,7 +518,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src, @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src, @@ -1098,7 +1098,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslideup_vx_f16mf4 (vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { @@ -1107,7 +1107,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslideup_vx_f16mf2 (vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { @@ -1116,7 +1116,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslideup_vx_f16m1 (vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { @@ -1125,7 +1125,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslideup_vx_f16m2 (vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { @@ -1134,7 +1134,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslideup_vx_f16m4 (vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { @@ -1143,7 +1143,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslideup_vx_f16m8 (vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -672,16 +672,16 @@ [ImmArg>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { let VLOperand = 4; } - // Input: (vector_in, vector_in, vector_in/scalar_in, vl) - class RISCVTernaryAAAXUnMasked + // Input: (vector_in, vector_in, scalar_in, vl, policy) + class RVVSlideUnMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, - LLVMMatchType<1>], - [IntrNoMem]>, RISCVVIntrinsic { + LLVMMatchType<1>, LLVMMatchType<1>], + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { let VLOperand = 3; } // Input: (vector_in, vector_in, vector_in/scalar_in, mask, vl, policy) - class RISCVTernaryAAAXMasked + class RVVSlideMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, @@ -1106,9 +1106,9 @@ def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftUnMasked; def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMasked; } - multiclass RISCVTernaryAAAX { - def "int_riscv_" # NAME : RISCVTernaryAAAXUnMasked; - def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAAXMasked; + multiclass RVVSlide { + def "int_riscv_" # NAME : RVVSlideUnMasked; + def "int_riscv_" # NAME # "_mask" : RVVSlideMasked; } multiclass RISCVTernaryAAXA { def "int_riscv_" # NAME : RISCVTernaryAAXAUnMasked; @@ -1355,8 +1355,8 @@ defm vfmerge : RISCVBinaryWithV0; - defm vslideup : RISCVTernaryAAAX; - defm vslidedown : RISCVTernaryAAAX; + defm vslideup : RVVSlide; + defm vslidedown : RVVSlide; defm vslide1up : RISCVBinaryAAX; defm vslide1down : RISCVBinaryAAX; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -2509,9 +2509,9 @@ } } -multiclass VPseudoTernaryV_VX { +multiclass VPseudoVSLDV_VX { foreach m = MxList in - defm _VX : VPseudoTernaryNoMaskNoPolicy; + defm _VX : VPseudoTernaryWithPolicy; } multiclass VPseudoTernaryV_VX_AAXA { @@ -2550,9 +2550,9 @@ m.vrclass, m, constraint>; } -multiclass VPseudoTernaryV_VI { +multiclass VPseudoVSLDV_VI { foreach m = MxList in - defm _VI : VPseudoTernaryNoMaskNoPolicy; + defm _VI : VPseudoTernaryWithPolicy; } multiclass VPseudoVMAC_VV_VX_AAXA { @@ -2570,9 +2570,9 @@ } multiclass VPseudoVSLD_VX_VI { - defm "" : VPseudoTernaryV_VX, + defm "" : VPseudoVSLDV_VX, Sched<[WriteVISlideX, ReadVISlideV, ReadVISlideV, ReadVISlideX, ReadVMask]>; - defm "" : VPseudoTernaryV_VI, + defm "" : VPseudoVSLDV_VI, Sched<[WriteVISlideI, ReadVISlideV, ReadVISlideV, ReadVMask]>; } @@ -3970,10 +3970,10 @@ multiclass VPatTernaryV_VX vtilist> { foreach vti = vtilist in - defm : VPatTernaryNoMaskNoPolicy; + defm : VPatTernaryWithPolicy; } multiclass VPatTernaryV_VX_AAXA vtilist, Operand Imm_type> { foreach vti = vtilist in - defm : VPatTernaryNoMaskNoPolicy; + defm : VPatTernaryWithPolicy; } multiclass VPatTernaryW_VV, VPatTernaryV_VI; + multiclass VPatBinaryM_VV_VX_VI vtilist> : VPatBinaryM_VV, diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -1907,7 +1907,7 @@ VLOpFrag)), (!cast("PseudoVSLIDEUP_VI_"#vti.LMul.MX) vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2, - GPR:$vl, vti.Log2SEW)>; + GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(vti.Vector (riscv_slideup_vl (vti.Vector vti.RegClass:$rs3), (vti.Vector vti.RegClass:$rs1), @@ -1915,7 +1915,7 @@ VLOpFrag)), (!cast("PseudoVSLIDEUP_VX_"#vti.LMul.MX) vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2, - GPR:$vl, vti.Log2SEW)>; + GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3), (vti.Vector vti.RegClass:$rs1), @@ -1923,7 +1923,7 @@ VLOpFrag)), (!cast("PseudoVSLIDEDOWN_VI_"#vti.LMul.MX) vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2, - GPR:$vl, vti.Log2SEW)>; + GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3), (vti.Vector vti.RegClass:$rs1), @@ -1931,7 +1931,7 @@ VLOpFrag)), (!cast("PseudoVSLIDEDOWN_VX_"#vti.LMul.MX) vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2, - GPR:$vl, vti.Log2SEW)>; + GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } } // Predicates = [HasVInstructions] diff --git a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir --- a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir +++ b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir @@ -141,7 +141,7 @@ ; CHECK-NEXT: $x5 = LD $x2, 0 :: (load (s64) from %stack.16) ; CHECK-NEXT: renamable $v0 = PseudoVRELOAD_M1 killed $x1 :: (load unknown-size from %stack.1, align 8) ; CHECK-NEXT: $x1 = LD $x2, 8 :: (load (s64) from %stack.15) - ; CHECK-NEXT: renamable $v0 = PseudoVSLIDEDOWN_VX_M1 undef renamable $v0, killed renamable $v0, killed renamable $x13, $noreg, 3 /* e8 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: renamable $v0 = PseudoVSLIDEDOWN_VX_M1 undef renamable $v0, killed renamable $v0, killed renamable $x13, $noreg, 3 /* e8 */, 1, implicit $vl, implicit $vtype ; CHECK-NEXT: renamable $x13 = PseudoVMV_X_S_M1 killed renamable $v0, 3 /* e8 */, implicit $vl, implicit $vtype ; CHECK-NEXT: BLT killed renamable $x16, renamable $x27, %bb.2 ; CHECK-NEXT: {{ $}} @@ -209,7 +209,7 @@ dead renamable $x13 = PseudoVSETIVLI 1, 64, implicit-def $vl, implicit-def $vtype renamable $x13 = nsw ADDI renamable $x16, -2 renamable $v0 = PseudoVRELOAD_M1 %stack.1 :: (load unknown-size from %stack.1, align 8) - renamable $v0 = PseudoVSLIDEDOWN_VX_M1 undef renamable $v0, killed renamable $v0, killed renamable $x13, $noreg, 3, implicit $vl, implicit $vtype + renamable $v0 = PseudoVSLIDEDOWN_VX_M1 undef renamable $v0, killed renamable $v0, killed renamable $x13, $noreg, 3, 1, implicit $vl, implicit $vtype renamable $x13 = PseudoVMV_X_S_M1 killed renamable $v0, 3, implicit $vl, implicit $vtype BLT killed renamable $x16, renamable $x27, %bb.2 diff --git a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll @@ -482,10 +482,10 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v14, v10, a0 ; CHECK-NEXT: vslidedown.vx v12, v9, a0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vi v13, v14, 0 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vx v12, v10, a0 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll @@ -1451,7 +1451,7 @@ ; LMULMAX8-NEXT: vmerge.vim v16, v16, 1, v0 ; LMULMAX8-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX8-NEXT: vmv.v.i v17, 0 -; LMULMAX8-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; LMULMAX8-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; LMULMAX8-NEXT: vslideup.vi v17, v16, 0 ; LMULMAX8-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX8-NEXT: vmsne.vi v16, v17, 0 @@ -1489,7 +1489,7 @@ ; LMULMAX4-NEXT: vmerge.vim v12, v12, 1, v0 ; LMULMAX4-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX4-NEXT: vmv.v.i v13, 0 -; LMULMAX4-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; LMULMAX4-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; LMULMAX4-NEXT: vslideup.vi v13, v12, 0 ; LMULMAX4-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX4-NEXT: vmsne.vi v12, v13, 0 @@ -1533,7 +1533,7 @@ ; LMULMAX2-NEXT: vmerge.vim v10, v10, 1, v0 ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vmv.v.i v11, 0 -; LMULMAX2-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; LMULMAX2-NEXT: vslideup.vi v11, v10, 0 ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vmsne.vi v10, v11, 0 @@ -1589,7 +1589,7 @@ ; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmv.v.i v10, 0 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v9, v10, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll @@ -558,7 +558,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -849,7 +849,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 8 ; LMULMAX1-NEXT: vse8.v v8, (a0) ; LMULMAX1-NEXT: ret @@ -1164,7 +1164,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vse8.v v8, (a0) ; LMULMAX1-NEXT: ret @@ -1188,7 +1188,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vse16.v v8, (a0) ; LMULMAX1-NEXT: ret @@ -1277,19 +1277,19 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 8 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 12 ; LMULMAX1-NEXT: vse8.v v8, (a0) ; LMULMAX1-NEXT: ret @@ -1313,12 +1313,12 @@ ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse16.v v10, (a1) @@ -1489,7 +1489,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vse8.v v8, (a0) ; LMULMAX1-NEXT: ret @@ -1520,7 +1520,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vse16.v v8, (a0) ; LMULMAX1-NEXT: ret @@ -1544,7 +1544,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vse32.v v8, (a0) ; LMULMAX1-NEXT: ret @@ -1575,7 +1575,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 @@ -1583,7 +1583,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 @@ -1591,7 +1591,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 ; LMULMAX1-NEXT: vse8.v v8, (a0) ; LMULMAX1-NEXT: ret @@ -1622,19 +1622,19 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 ; LMULMAX1-NEXT: vse16.v v8, (a0) ; LMULMAX1-NEXT: ret @@ -1658,12 +1658,12 @@ ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 2 ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse32.v v10, (a1) @@ -1696,7 +1696,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 @@ -1704,7 +1704,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 6, e8, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e8, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 @@ -1712,7 +1712,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v12 @@ -1720,7 +1720,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 10, e8, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 10, e8, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 8 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v13 @@ -1728,7 +1728,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 10 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v14 @@ -1736,7 +1736,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 14, e8, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 14, e8, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 12 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v15 @@ -1744,7 +1744,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 14 ; LMULMAX1-NEXT: vse8.v v8, (a0) ; LMULMAX1-NEXT: ret @@ -1763,7 +1763,7 @@ ; LMULMAX4-NEXT: vncvt.x.x.w v8, v14 ; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; LMULMAX4-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, tu, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX4-NEXT: vslideup.vi v8, v12, 8 ; LMULMAX4-NEXT: vse8.v v8, (a0) ; LMULMAX4-NEXT: ret @@ -1783,19 +1783,19 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v13 @@ -1805,19 +1805,19 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v10, v12 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v14 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v15 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 6 ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse16.v v10, (a1) @@ -1834,7 +1834,7 @@ ; LMULMAX4-NEXT: vncvt.x.x.w v14, v8 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; LMULMAX4-NEXT: vncvt.x.x.w v8, v14 -; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, tu, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; LMULMAX4-NEXT: vslideup.vi v8, v12, 8 ; LMULMAX4-NEXT: vse16.v v8, (a0) ; LMULMAX4-NEXT: ret @@ -1849,22 +1849,22 @@ ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v13 ; LMULMAX1-NEXT: vncvt.x.x.w v11, v12 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v11, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v15 ; LMULMAX1-NEXT: vncvt.x.x.w v12, v14 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v12, v9, 2 ; LMULMAX1-NEXT: addi a1, a0, 48 ; LMULMAX1-NEXT: vse32.v v12, (a1) @@ -1880,7 +1880,7 @@ ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX4-NEXT: vncvt.x.x.w v16, v12 ; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, tu, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; LMULMAX4-NEXT: vslideup.vi v12, v16, 8 ; LMULMAX4-NEXT: vse32.v v12, (a0) ; LMULMAX4-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll @@ -346,7 +346,7 @@ ; LMULMAX2-NEXT: vmerge.vim v8, v8, 1, v0 ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vmv.v.i v9, 0 -; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; LMULMAX2-NEXT: vslideup.vi v9, v8, 0 ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vmsne.vi v8, v9, 0 @@ -362,7 +362,7 @@ ; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmv.v.i v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; LMULMAX1-NEXT: vslideup.vi v9, v8, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v8, v9, 0 @@ -390,7 +390,7 @@ ; LMULMAX2-NEXT: vmerge.vim v8, v8, 1, v0 ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vmv.v.i v9, 0 -; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; LMULMAX2-NEXT: vslideup.vi v9, v8, 0 ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vmsne.vi v8, v9, 0 @@ -411,7 +411,7 @@ ; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmv.v.i v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; LMULMAX1-NEXT: vslideup.vi v9, v8, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v8, v9, 0 @@ -440,7 +440,7 @@ ; LMULMAX2-NEXT: vmerge.vim v8, v8, 1, v0 ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vmv.v.i v9, 0 -; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; LMULMAX2-NEXT: vslideup.vi v9, v8, 0 ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vmsne.vi v8, v9, 0 @@ -462,7 +462,7 @@ ; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmv.v.i v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; LMULMAX1-NEXT: vslideup.vi v9, v8, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v8, v9, 0 @@ -482,7 +482,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -507,7 +507,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -526,7 +526,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -551,7 +551,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -577,7 +577,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -602,7 +602,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll @@ -117,7 +117,7 @@ ; CHECK-NEXT: addi a1, a1, %lo(.LCPI4_0) ; CHECK-NEXT: vlse32.v v8, (a1), zero ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 3, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vse32.v v8, (a0) @@ -132,7 +132,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vse32.v v9, (a0) @@ -153,7 +153,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v8, ft0 ; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vse32.v v9, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll @@ -164,7 +164,7 @@ ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v10, v8 ; LMULMAX1-NEXT: vfncvt.f.f.w v8, v9 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v10, v8, 4 ; LMULMAX1-NEXT: vse16.v v10, (a1) ; LMULMAX1-NEXT: ret @@ -204,19 +204,19 @@ ; LMULMAX1-NEXT: vfncvt.rod.f.f.w v12, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v11, v12 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v10, v11, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rod.f.f.w v11, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v9, v11 -; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rod.f.f.w v9, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v8, v9 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v10, v8, 6 ; LMULMAX1-NEXT: vse16.v v10, (a1) ; LMULMAX1-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll @@ -46,7 +46,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -71,7 +71,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -96,7 +96,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -121,7 +121,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -210,7 +210,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -235,7 +235,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -468,7 +468,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -495,7 +495,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -551,7 +551,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -576,7 +576,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -601,7 +601,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -626,7 +626,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -715,7 +715,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -740,7 +740,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -974,7 +974,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -1002,7 +1002,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -1059,7 +1059,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -1084,7 +1084,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -1109,7 +1109,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -1134,7 +1134,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -1223,7 +1223,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -1248,7 +1248,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -1482,7 +1482,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -1510,7 +1510,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll @@ -283,9 +283,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 7, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v10, v8, 1 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vi v10, v8, 7 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %s = shufflevector <8 x float> %x, <8 x float> poison, <8 x i32> ret <8 x float> %s @@ -296,9 +296,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v12, v8, 6 -; CHECK-NEXT: vsetivli zero, 8, e64, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vslideup.vi v12, v8, 2 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %s = shufflevector <8 x double> %x, <8 x double> poison, <8 x i32> ret <8 x double> %s @@ -309,7 +309,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 6, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 6 ; CHECK-NEXT: ret %s = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> @@ -321,9 +321,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 3, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v12, v12, 5 -; CHECK-NEXT: vsetivli zero, 8, e64, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vslideup.vi v12, v8, 3 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %s = shufflevector <8 x double> %x, <8 x double> %y, <8 x i32> ret <8 x double> %s diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll @@ -135,9 +135,8 @@ ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; LMULMAX1-NEXT: vmv.v.i v9, 0 ; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v0, v8, 0 ; LMULMAX1-NEXT: ret %z = fptosi <8 x float> %x to <8 x i1> @@ -169,9 +168,8 @@ ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; LMULMAX1-NEXT: vmv.v.i v9, 0 ; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v0, v8, 0 ; LMULMAX1-NEXT: ret %z = fptoui <8 x float> %x to <8 x i1> @@ -438,7 +436,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v11, v12 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v11, v11 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; LMULMAX1-NEXT: vslideup.vi v10, v11, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v11, v9 @@ -446,7 +444,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, ta, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v9, v8 @@ -454,7 +452,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v8, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vslideup.vi v10, v8, 6 ; LMULMAX1-NEXT: vse8.v v10, (a1) ; LMULMAX1-NEXT: ret @@ -500,7 +498,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v11, v12 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v11, v11 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; LMULMAX1-NEXT: vslideup.vi v10, v11, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v11, v9 @@ -508,7 +506,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, ta, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v9, v8 @@ -516,7 +514,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v8, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vslideup.vi v10, v8, 6 ; LMULMAX1-NEXT: vse8.v v10, (a1) ; LMULMAX1-NEXT: ret @@ -551,7 +549,7 @@ ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vmv.v.i v9, 0 ; LMULMAX1-NEXT: vmerge.vim v13, v9, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; LMULMAX1-NEXT: vslideup.vi v12, v13, 2 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v0, v12, 0 @@ -562,7 +560,7 @@ ; LMULMAX1-NEXT: vmsne.vi v0, v10, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vmerge.vim v10, v9, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, ta, mu ; LMULMAX1-NEXT: vslideup.vi v12, v10, 4 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v0, v12, 0 @@ -573,9 +571,8 @@ ; LMULMAX1-NEXT: vmsne.vi v0, v10, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v0, v8, 0 ; LMULMAX1-NEXT: ret %z = fptosi <8 x double> %x to <8 x i1> @@ -607,7 +604,7 @@ ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vmv.v.i v9, 0 ; LMULMAX1-NEXT: vmerge.vim v13, v9, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; LMULMAX1-NEXT: vslideup.vi v12, v13, 2 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v0, v12, 0 @@ -618,7 +615,7 @@ ; LMULMAX1-NEXT: vmsne.vi v0, v10, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vmerge.vim v10, v9, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, ta, mu ; LMULMAX1-NEXT: vslideup.vi v12, v10, 4 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v0, v12, 0 @@ -629,9 +626,8 @@ ; LMULMAX1-NEXT: vmsne.vi v0, v10, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v0, v8, 0 ; LMULMAX1-NEXT: ret %z = fptoui <8 x double> %x to <8 x i1> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll @@ -482,19 +482,19 @@ ; LMULMAX1-NEXT: vfncvt.f.x.w v12, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v11, v12 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v10, v11, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.f.x.w v11, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v9, v11 -; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.f.x.w v9, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v8, v9 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v10, v8, 6 ; LMULMAX1-NEXT: vse16.v v10, (a1) ; LMULMAX1-NEXT: ret @@ -534,19 +534,19 @@ ; LMULMAX1-NEXT: vfncvt.f.xu.w v12, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v11, v12 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v10, v11, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.f.xu.w v11, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v9, v11 -; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.f.xu.w v9, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v8, v9 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v10, v8, 6 ; LMULMAX1-NEXT: vse16.v v10, (a1) ; LMULMAX1-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll @@ -28,7 +28,7 @@ ; RV32-NEXT: vmv.v.i v9, 0 ; RV32-NEXT: vmerge.vim v9, v9, 1, v0 ; RV32-NEXT: addi a0, a1, 1 -; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV32-NEXT: vslideup.vx v9, v8, a1 ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32-NEXT: vand.vi v8, v9, 1 @@ -43,7 +43,7 @@ ; RV64-NEXT: vmerge.vim v9, v9, 1, v0 ; RV64-NEXT: sext.w a0, a1 ; RV64-NEXT: addi a1, a0, 1 -; RV64-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; RV64-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; RV64-NEXT: vslideup.vx v9, v8, a0 ; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV64-NEXT: vand.vi v8, v9, 1 @@ -60,9 +60,7 @@ ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 1 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -78,7 +76,7 @@ ; RV32-NEXT: vmv.v.i v9, 0 ; RV32-NEXT: vmerge.vim v9, v9, 1, v0 ; RV32-NEXT: addi a0, a1, 1 -; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV32-NEXT: vslideup.vx v9, v8, a1 ; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV32-NEXT: vand.vi v8, v9, 1 @@ -93,7 +91,7 @@ ; RV64-NEXT: vmerge.vim v9, v9, 1, v0 ; RV64-NEXT: sext.w a0, a1 ; RV64-NEXT: addi a1, a0, 1 -; RV64-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; RV64-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; RV64-NEXT: vslideup.vx v9, v8, a0 ; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV64-NEXT: vand.vi v8, v9, 1 @@ -110,7 +108,7 @@ ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 @@ -128,7 +126,7 @@ ; RV32-NEXT: vmv.v.i v9, 0 ; RV32-NEXT: vmerge.vim v9, v9, 1, v0 ; RV32-NEXT: addi a0, a1, 1 -; RV32-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; RV32-NEXT: vslideup.vx v9, v8, a1 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vand.vi v8, v9, 1 @@ -143,7 +141,7 @@ ; RV64-NEXT: vmerge.vim v9, v9, 1, v0 ; RV64-NEXT: sext.w a0, a1 ; RV64-NEXT: addi a1, a0, 1 -; RV64-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; RV64-NEXT: vslideup.vx v9, v8, a0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vand.vi v8, v9, 1 @@ -161,7 +159,7 @@ ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 -; CHECK-NEXT: vsetivli zero, 2, e8, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, m4, ta, mu ; CHECK-NEXT: vslideup.vi v12, v8, 1 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v12, 1 @@ -180,7 +178,7 @@ ; RV32-NEXT: vmv.v.i v12, 0 ; RV32-NEXT: vmerge.vim v12, v12, 1, v0 ; RV32-NEXT: addi a0, a1, 1 -; RV32-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; RV32-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; RV32-NEXT: vslideup.vx v12, v8, a1 ; RV32-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; RV32-NEXT: vand.vi v8, v12, 1 @@ -196,7 +194,7 @@ ; RV64-NEXT: vmerge.vim v12, v12, 1, v0 ; RV64-NEXT: sext.w a0, a1 ; RV64-NEXT: addi a1, a0, 1 -; RV64-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; RV64-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; RV64-NEXT: vslideup.vx v12, v8, a0 ; RV64-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; RV64-NEXT: vand.vi v8, v12, 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll @@ -14,7 +14,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v12, (a0) -; CHECK-NEXT: vsetivli zero, 2, e32, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 0 ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp @@ -27,7 +27,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v12, (a0) -; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 2 ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp @@ -40,7 +40,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v12, (a0) -; CHECK-NEXT: vsetivli zero, 8, e32, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 6 ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp @@ -53,7 +53,7 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vle32.v v12, (a0) -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m4, tu, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m4, ta, mu ; LMULMAX2-NEXT: vslideup.vi v8, v12, 0 ; LMULMAX2-NEXT: ret ; @@ -63,9 +63,9 @@ ; LMULMAX1-NEXT: vle32.v v12, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle32.v v16, (a0) -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m4, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m4, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v12, 0 -; LMULMAX1-NEXT: vsetivli zero, 8, e32, m4, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e32, m4, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v16, 4 ; LMULMAX1-NEXT: ret %sv = load <8 x i32>, <8 x i32>* %svp @@ -78,7 +78,7 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vle32.v v12, (a0) -; LMULMAX2-NEXT: vsetivli zero, 16, e32, m4, tu, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; LMULMAX2-NEXT: vslideup.vi v8, v12, 8 ; LMULMAX2-NEXT: ret ; @@ -88,9 +88,9 @@ ; LMULMAX1-NEXT: vle32.v v12, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle32.v v16, (a0) -; LMULMAX1-NEXT: vsetivli zero, 12, e32, m4, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 12, e32, m4, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v12, 8 -; LMULMAX1-NEXT: vsetivli zero, 16, e32, m4, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v16, 12 ; LMULMAX1-NEXT: ret %sv = load <8 x i32>, <8 x i32>* %svp @@ -116,7 +116,7 @@ ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v9, (a0) -; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vse32.v v9, (a0) @@ -135,7 +135,6 @@ ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v9, (a0) -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 2 ; CHECK-NEXT: vse32.v v9, (a0) ; CHECK-NEXT: ret @@ -167,7 +166,7 @@ ; LMULMAX2-NEXT: vle32.v v8, (a1) ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vle32.v v10, (a0) -; LMULMAX2-NEXT: vsetivli zero, 2, e32, m2, tu, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e32, m2, ta, mu ; LMULMAX2-NEXT: vslideup.vi v10, v8, 0 ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vse32.v v10, (a0) @@ -179,7 +178,7 @@ ; LMULMAX1-NEXT: vle32.v v8, (a1) ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vle32.v v9, (a0) -; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu ; LMULMAX1-NEXT: vslideup.vi v9, v8, 0 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vse32.v v9, (a0) @@ -198,7 +197,7 @@ ; LMULMAX2-NEXT: vle32.v v8, (a1) ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vle32.v v10, (a0) -; LMULMAX2-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e32, m2, ta, mu ; LMULMAX2-NEXT: vslideup.vi v10, v8, 2 ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vse32.v v10, (a0) @@ -210,7 +209,6 @@ ; LMULMAX1-NEXT: vle32.v v8, (a1) ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vle32.v v9, (a0) -; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v9, v8, 2 ; LMULMAX1-NEXT: vse32.v v9, (a0) ; LMULMAX1-NEXT: ret @@ -228,7 +226,6 @@ ; LMULMAX2-NEXT: vle32.v v8, (a1) ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vle32.v v10, (a0) -; LMULMAX2-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; LMULMAX2-NEXT: vslideup.vi v10, v8, 6 ; LMULMAX2-NEXT: vse32.v v10, (a0) ; LMULMAX2-NEXT: ret @@ -240,7 +237,6 @@ ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vle32.v v9, (a0) -; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v9, v8, 2 ; LMULMAX1-NEXT: vse32.v v9, (a0) ; LMULMAX1-NEXT: ret @@ -283,7 +279,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v9, (a1) -; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vse16.v v8, (a0) @@ -302,7 +298,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v9, (a1) -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -321,7 +317,7 @@ ; LMULMAX2-NEXT: vlm.v v8, (a0) ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vlm.v v9, (a1) -; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf4, tu, mu +; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf4, ta, mu ; LMULMAX2-NEXT: vslideup.vi v8, v9, 0 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vsm.v v8, (a0) @@ -333,7 +329,7 @@ ; LMULMAX1-NEXT: vlm.v v8, (a0) ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vlm.v v9, (a1) -; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX1-NEXT: vsm.v v8, (a0) @@ -353,7 +349,7 @@ ; LMULMAX2-NEXT: vlm.v v8, (a0) ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vlm.v v9, (a1) -; LMULMAX2-NEXT: vsetivli zero, 3, e8, mf4, tu, mu +; LMULMAX2-NEXT: vsetivli zero, 3, e8, mf4, ta, mu ; LMULMAX2-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vsm.v v8, (a0) @@ -366,7 +362,7 @@ ; LMULMAX1-NEXT: vlm.v v8, (a0) ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vlm.v v9, (a1) -; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX1-NEXT: vsm.v v8, (a0) @@ -392,7 +388,7 @@ ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -419,9 +415,8 @@ ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 4 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret @@ -437,7 +432,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v9, (a0) -; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 0 ; CHECK-NEXT: ret %sv = load <2 x i16>, <2 x i16>* %svp @@ -450,7 +445,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v9, (a0) -; CHECK-NEXT: vsetivli zero, 6, e16, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 6, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 4 ; CHECK-NEXT: ret %sv = load <2 x i16>, <2 x i16>* %svp @@ -470,7 +465,6 @@ ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmsne.vi v0, v9, 0 @@ -485,7 +479,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vlm.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, tu, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vslideup.vi v0, v8, 0 ; CHECK-NEXT: ret %sv = load <8 x i1>, <8 x i1>* %svp @@ -498,7 +492,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vlm.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 3, e8, mf8, tu, mu +; CHECK-NEXT: vsetivli zero, 3, e8, mf8, ta, mu ; CHECK-NEXT: vslideup.vi v0, v8, 2 ; CHECK-NEXT: ret %sv = load <8 x i1>, <8 x i1>* %svp @@ -514,7 +508,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v16, (a1) -; CHECK-NEXT: vsetivli zero, 6, e64, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 6, e64, m8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 4 ; CHECK-NEXT: vs8r.v v8, (a2) ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll @@ -14,7 +14,7 @@ ; RV32-NEXT: vmv.v.i v10, 0 ; RV32-NEXT: vslide1up.vx v12, v10, a2 ; RV32-NEXT: vslide1up.vx v10, v12, a1 -; RV32-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vslideup.vi v8, v10, 3 ; RV32-NEXT: vse64.v v8, (a0) ; RV32-NEXT: ret @@ -24,7 +24,6 @@ ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v10, a1 -; RV64-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; RV64-NEXT: vslideup.vi v8, v10, 3 ; RV64-NEXT: vse64.v v8, (a0) ; RV64-NEXT: ret @@ -49,13 +48,13 @@ ; RV32-NEXT: vlse32.v v10, (a4), zero ; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; RV32-NEXT: vmv.s.x v10, a3 -; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vslideup.vi v8, v10, 2 ; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, mu ; RV32-NEXT: vmv.v.i v10, 0 ; RV32-NEXT: vslide1up.vx v12, v10, a2 ; RV32-NEXT: vslide1up.vx v10, v12, a1 -; RV32-NEXT: vsetivli zero, 3, e64, m2, tu, mu +; RV32-NEXT: vsetivli zero, 3, e64, m2, ta, mu ; RV32-NEXT: vslideup.vi v8, v10, 2 ; RV32-NEXT: sw a1, 16(a0) ; RV32-NEXT: sw a2, 20(a0) @@ -79,7 +78,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, a1 -; CHECK-NEXT: vsetivli zero, 15, e8, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 15, e8, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 14 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vse8.v v8, (a0) @@ -98,7 +97,7 @@ ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: vmv.s.x v12, a1 ; RV32-NEXT: addi a1, a2, 1 -; RV32-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; RV32-NEXT: vslideup.vx v8, v12, a2 ; RV32-NEXT: vsetvli zero, a3, e16, m4, ta, mu ; RV32-NEXT: vse16.v v8, (a0) @@ -112,7 +111,7 @@ ; RV64-NEXT: vmv.s.x v12, a1 ; RV64-NEXT: sext.w a1, a2 ; RV64-NEXT: addi a2, a1, 1 -; RV64-NEXT: vsetvli zero, a2, e16, m4, tu, mu +; RV64-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; RV64-NEXT: vslideup.vx v8, v12, a1 ; RV64-NEXT: vsetvli zero, a3, e16, m4, ta, mu ; RV64-NEXT: vse16.v v8, (a0) @@ -130,7 +129,7 @@ ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: vfmv.s.f v10, fa0 ; RV32-NEXT: addi a2, a1, 1 -; RV32-NEXT: vsetvli zero, a2, e32, m2, tu, mu +; RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; RV32-NEXT: vslideup.vx v8, v10, a1 ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vse32.v v8, (a0) @@ -143,7 +142,7 @@ ; RV64-NEXT: vfmv.s.f v10, fa0 ; RV64-NEXT: sext.w a1, a1 ; RV64-NEXT: addi a2, a1, 1 -; RV64-NEXT: vsetvli zero, a2, e32, m2, tu, mu +; RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; RV64-NEXT: vslideup.vx v8, v10, a1 ; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV64-NEXT: vse32.v v8, (a0) @@ -178,7 +177,7 @@ ; RV32-NEXT: li a2, -1 ; RV32-NEXT: vmv.s.x v12, a2 ; RV32-NEXT: addi a2, a1, 1 -; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vslideup.vx v8, v12, a1 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vse64.v v8, (a0) @@ -192,7 +191,7 @@ ; RV64-NEXT: vmv.s.x v12, a2 ; RV64-NEXT: sext.w a1, a1 ; RV64-NEXT: addi a2, a1, 1 -; RV64-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV64-NEXT: vslideup.vx v8, v12, a1 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vse64.v v8, (a0) @@ -227,7 +226,7 @@ ; RV32-NEXT: li a2, 6 ; RV32-NEXT: vmv.s.x v12, a2 ; RV32-NEXT: addi a2, a1, 1 -; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vslideup.vx v8, v12, a1 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vse64.v v8, (a0) @@ -241,7 +240,7 @@ ; RV64-NEXT: vmv.s.x v12, a2 ; RV64-NEXT: sext.w a1, a1 ; RV64-NEXT: addi a2, a1, 1 -; RV64-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV64-NEXT: vslideup.vx v8, v12, a1 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vse64.v v8, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll @@ -195,7 +195,7 @@ ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vmv.v.i v8, 0 -; RV32-NEXT: vsetivli zero, 3, e32, m1, tu, mu +; RV32-NEXT: vsetivli zero, 3, e32, m1, ta, mu ; RV32-NEXT: vslideup.vi v8, v9, 2 ; RV32-NEXT: lui a0, %hi(.LCPI12_0) ; RV32-NEXT: addi a0, a0, %lo(.LCPI12_0) @@ -219,7 +219,7 @@ ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vmv.v.i v8, 0 -; RV32-NEXT: vsetivli zero, 3, e32, m1, tu, mu +; RV32-NEXT: vsetivli zero, 3, e32, m1, ta, mu ; RV32-NEXT: vslideup.vi v8, v9, 2 ; RV32-NEXT: lui a0, %hi(.LCPI13_0) ; RV32-NEXT: addi a0, a0, %lo(.LCPI13_0) @@ -312,7 +312,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: vmv.v.i v9, 8 -; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 3 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vse16.v v9, (a0) @@ -549,14 +549,13 @@ ; RV32-NEXT: vse32.v v8, (a3) ; RV32-NEXT: vse32.v v8, (a4) ; RV32-NEXT: vmv.s.x v8, zero -; RV32-NEXT: vsetivli zero, 2, e32, m1, tu, mu +; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, mu ; RV32-NEXT: vslideup.vi v9, v8, 1 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vse32.v v9, (a5) ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vmv.s.x v8, a0 ; RV32-NEXT: vmv.v.i v9, 0 -; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; RV32-NEXT: vslideup.vi v9, v8, 3 ; RV32-NEXT: vse32.v v9, (a6) ; RV32-NEXT: ret @@ -573,14 +572,13 @@ ; RV64-NEXT: vse32.v v8, (a3) ; RV64-NEXT: vse32.v v8, (a4) ; RV64-NEXT: vmv.s.x v8, zero -; RV64-NEXT: vsetivli zero, 2, e32, m1, tu, mu +; RV64-NEXT: vsetivli zero, 2, e32, m1, ta, mu ; RV64-NEXT: vslideup.vi v9, v8, 1 ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV64-NEXT: vse32.v v9, (a5) ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vmv.s.x v8, a0 ; RV64-NEXT: vmv.v.i v9, 0 -; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; RV64-NEXT: vslideup.vi v9, v8, 3 ; RV64-NEXT: vse32.v v9, (a6) ; RV64-NEXT: ret @@ -612,13 +610,12 @@ ; CHECK-NEXT: li a0, 3 ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v10, 4 -; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v10, v8, 1 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vse16.v v10, (a5) ; CHECK-NEXT: li a0, 4 ; CHECK-NEXT: vmv.s.x v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 3 ; CHECK-NEXT: vse16.v v9, (a6) ; CHECK-NEXT: ret @@ -755,7 +752,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: vsetivli zero, 7, e8, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 7, e8, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 6 ; CHECK-NEXT: ret ret <16 x i8> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll @@ -217,7 +217,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vse8.v v8, (a1) ; LMULMAX1-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll @@ -192,41 +192,37 @@ ; RV32-NEXT: li a0, 5 ; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV32-NEXT: vmv.s.x v16, a0 -; RV32-NEXT: vmv.v.i v20, 2 -; RV32-NEXT: vsetvli zero, zero, e16, m1, tu, mu -; RV32-NEXT: vslideup.vi v20, v16, 7 ; RV32-NEXT: lui a0, %hi(.LCPI11_0) ; RV32-NEXT: addi a0, a0, %lo(.LCPI11_0) -; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV32-NEXT: vle16.v v21, (a0) +; RV32-NEXT: vle16.v v20, (a0) +; RV32-NEXT: vmv.v.i v21, 2 +; RV32-NEXT: vslideup.vi v21, v16, 7 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; RV32-NEXT: vrgatherei16.vv v16, v8, v21 +; RV32-NEXT: vrgatherei16.vv v16, v8, v20 ; RV32-NEXT: li a0, 164 ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32-NEXT: vmv.s.x v0, a0 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vrgatherei16.vv v16, v12, v20, v0.t +; RV32-NEXT: vrgatherei16.vv v16, v12, v21, v0.t ; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_shuffle_vv_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: li a0, 5 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vmv.s.x v16, a0 -; RV64-NEXT: vmv.v.i v20, 2 -; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu -; RV64-NEXT: vslideup.vi v20, v16, 7 ; RV64-NEXT: lui a0, %hi(.LCPI11_0) ; RV64-NEXT: addi a0, a0, %lo(.LCPI11_0) -; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; RV64-NEXT: vle64.v v24, (a0) -; RV64-NEXT: vrgather.vv v16, v8, v24 +; RV64-NEXT: vle64.v v20, (a0) +; RV64-NEXT: li a0, 5 +; RV64-NEXT: vmv.s.x v16, a0 +; RV64-NEXT: vmv.v.i v24, 2 +; RV64-NEXT: vslideup.vi v24, v16, 7 +; RV64-NEXT: vrgather.vv v16, v8, v20 ; RV64-NEXT: li a0, 164 ; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV64-NEXT: vmv.s.x v0, a0 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vrgather.vv v16, v12, v20, v0.t +; RV64-NEXT: vrgather.vv v16, v12, v24, v0.t ; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %s = shufflevector <8 x i64> %x, <8 x i64> %y, <8 x i32> @@ -362,7 +358,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vmv.v.i v10, 4 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v10, v9, 1 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -463,10 +459,9 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vmv.v.i v11, 0 -; CHECK-NEXT: vsetivli zero, 3, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 3, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v11, v10, 2 ; CHECK-NEXT: li a0, 70 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vi v10, v8, 2 @@ -484,7 +479,7 @@ ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmv.s.x v10, a0 ; RV32-NEXT: vmv.v.i v11, 0 -; RV32-NEXT: vsetivli zero, 6, e8, mf2, tu, mu +; RV32-NEXT: vsetivli zero, 6, e8, mf2, ta, mu ; RV32-NEXT: vslideup.vi v11, v10, 5 ; RV32-NEXT: lui a0, 8256 ; RV32-NEXT: addi a0, a0, 2 @@ -504,7 +499,7 @@ ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vmv.v.i v11, 0 -; RV64-NEXT: vsetivli zero, 6, e8, mf2, tu, mu +; RV64-NEXT: vsetivli zero, 6, e8, mf2, ta, mu ; RV64-NEXT: vslideup.vi v11, v10, 5 ; RV64-NEXT: lui a0, 8256 ; RV64-NEXT: addiw a0, a0, 2 @@ -526,7 +521,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 3 @@ -582,9 +577,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v9, v8, 2 -; CHECK-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 6 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %s = shufflevector <8 x i16> %x, <8 x i16> poison, <8 x i32> ret <8 x i16> %s @@ -595,9 +590,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 3, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v10, v8, 5 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vi v10, v8, 3 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %s = shufflevector <8 x i32> %x, <8 x i32> poison, <8 x i32> ret <8 x i32> %s @@ -608,7 +603,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 6 ; CHECK-NEXT: ret %s = shufflevector <8 x i16> %x, <8 x i16> %y, <8 x i32> @@ -620,7 +615,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 3, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 5 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %s = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll @@ -949,7 +949,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 3 ; CHECK-NEXT: vmerge.vim v10, v10, 2, v0 -; CHECK-NEXT: vsetivli zero, 7, e16, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 7, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vi v10, v9, 6 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v11, 0 @@ -957,7 +957,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vmv.s.x v12, a1 -; CHECK-NEXT: vsetivli zero, 7, e16, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 7, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vi v11, v9, 6 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: lui a1, %hi(.LCPI53_0) @@ -985,7 +985,7 @@ ; CHECK-NEXT: lui a1, 524288 ; CHECK-NEXT: vmv.s.x v9, a1 ; CHECK-NEXT: vmv.v.i v10, 0 -; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 3, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vi v10, v9, 2 ; CHECK-NEXT: lui a1, %hi(.LCPI54_0) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI54_0) @@ -998,9 +998,7 @@ ; CHECK-NEXT: li a1, 1 ; CHECK-NEXT: vmv.s.x v9, a1 ; CHECK-NEXT: vmv.v.i v10, 2 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v10, v9, 3 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v10 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -1215,7 +1213,7 @@ ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vmv.s.x v8, a1 ; RV32-NEXT: vmv.v.i v9, 0 -; RV32-NEXT: vsetivli zero, 3, e32, m1, tu, mu +; RV32-NEXT: vsetivli zero, 3, e32, m1, ta, mu ; RV32-NEXT: vslideup.vi v9, v8, 2 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vsra.vv v8, v10, v9 @@ -4209,7 +4207,7 @@ ; LMULMAX1-RV32-NEXT: lui a2, 524288 ; LMULMAX1-RV32-NEXT: vmv.s.x v10, a2 ; LMULMAX1-RV32-NEXT: vmv.v.i v11, 0 -; LMULMAX1-RV32-NEXT: vsetivli zero, 3, e32, m1, tu, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 3, e32, m1, ta, mu ; LMULMAX1-RV32-NEXT: vslideup.vi v11, v10, 2 ; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI131_0) ; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI131_0) @@ -4222,9 +4220,7 @@ ; LMULMAX1-RV32-NEXT: li a2, 1 ; LMULMAX1-RV32-NEXT: vmv.s.x v12, a2 ; LMULMAX1-RV32-NEXT: vmv.v.i v13, 2 -; LMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; LMULMAX1-RV32-NEXT: vslideup.vi v13, v12, 3 -; LMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; LMULMAX1-RV32-NEXT: vsrl.vv v9, v9, v13 ; LMULMAX1-RV32-NEXT: vmulhu.vv v10, v8, v10 ; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v10 @@ -4271,7 +4267,7 @@ ; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-RV32-NEXT: vmv.s.x v12, a1 ; LMULMAX2-RV32-NEXT: vmv.v.i v14, 0 -; LMULMAX2-RV32-NEXT: vsetivli zero, 6, e32, m2, tu, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 6, e32, m2, ta, mu ; LMULMAX2-RV32-NEXT: vslideup.vi v14, v12, 5 ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-RV32-NEXT: vmulhu.vv v8, v8, v14 @@ -4293,7 +4289,7 @@ ; LMULMAX2-RV64-NEXT: slli a1, a1, 63 ; LMULMAX2-RV64-NEXT: vmv.s.x v10, a1 ; LMULMAX2-RV64-NEXT: vmv.v.i v12, 0 -; LMULMAX2-RV64-NEXT: vsetivli zero, 3, e64, m2, tu, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 3, e64, m2, ta, mu ; LMULMAX2-RV64-NEXT: vslideup.vi v12, v10, 2 ; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI132_0) ; LMULMAX2-RV64-NEXT: addi a1, a1, %lo(.LCPI132_0) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll @@ -667,7 +667,6 @@ ; RV32-LMULMAX4-NEXT: lui a0, 748384 ; RV32-LMULMAX4-NEXT: addi a0, a0, 1776 ; RV32-LMULMAX4-NEXT: vmv.s.x v0, a0 -; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; RV32-LMULMAX4-NEXT: vslideup.vi v0, v8, 1 ; RV32-LMULMAX4-NEXT: ret ; @@ -688,7 +687,6 @@ ; RV32-LMULMAX8-NEXT: lui a0, 748384 ; RV32-LMULMAX8-NEXT: addi a0, a0, 1776 ; RV32-LMULMAX8-NEXT: vmv.s.x v0, a0 -; RV32-LMULMAX8-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; RV32-LMULMAX8-NEXT: vslideup.vi v0, v8, 1 ; RV32-LMULMAX8-NEXT: ret ; @@ -790,16 +788,13 @@ ; RV32-LMULMAX4-NEXT: lui a0, 748384 ; RV32-LMULMAX4-NEXT: addi a0, a0, 1776 ; RV32-LMULMAX4-NEXT: vmv.s.x v0, a0 -; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; RV32-LMULMAX4-NEXT: vslideup.vi v0, v8, 1 ; RV32-LMULMAX4-NEXT: lui a0, 945060 ; RV32-LMULMAX4-NEXT: addi a0, a0, -1793 -; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV32-LMULMAX4-NEXT: vmv.s.x v9, a0 ; RV32-LMULMAX4-NEXT: lui a0, 551776 ; RV32-LMULMAX4-NEXT: addi a0, a0, 1776 ; RV32-LMULMAX4-NEXT: vmv.s.x v8, a0 -; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; RV32-LMULMAX4-NEXT: vslideup.vi v8, v9, 1 ; RV32-LMULMAX4-NEXT: ret ; @@ -823,19 +818,17 @@ ; RV32-LMULMAX8-NEXT: lui a0, 748384 ; RV32-LMULMAX8-NEXT: addi a0, a0, 1776 ; RV32-LMULMAX8-NEXT: vmv.s.x v0, a0 -; RV32-LMULMAX8-NEXT: vsetivli zero, 2, e32, m1, tu, mu +; RV32-LMULMAX8-NEXT: vsetivli zero, 2, e32, m1, ta, mu ; RV32-LMULMAX8-NEXT: vslideup.vi v0, v8, 1 ; RV32-LMULMAX8-NEXT: lui a0, 551776 ; RV32-LMULMAX8-NEXT: addi a0, a0, 1776 -; RV32-LMULMAX8-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-LMULMAX8-NEXT: vmv.s.x v8, a0 -; RV32-LMULMAX8-NEXT: vsetivli zero, 3, e32, m1, tu, mu +; RV32-LMULMAX8-NEXT: vsetivli zero, 3, e32, m1, ta, mu ; RV32-LMULMAX8-NEXT: vslideup.vi v0, v8, 2 ; RV32-LMULMAX8-NEXT: lui a0, 945060 ; RV32-LMULMAX8-NEXT: addi a0, a0, -1793 -; RV32-LMULMAX8-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-LMULMAX8-NEXT: vmv.s.x v8, a0 -; RV32-LMULMAX8-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; RV32-LMULMAX8-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-LMULMAX8-NEXT: vslideup.vi v0, v8, 3 ; RV32-LMULMAX8-NEXT: ret ; @@ -848,7 +841,6 @@ ; RV64-LMULMAX8-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-LMULMAX8-NEXT: vmv.s.x v8, a0 ; RV64-LMULMAX8-NEXT: vmv.s.x v0, a1 -; RV64-LMULMAX8-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; RV64-LMULMAX8-NEXT: vslideup.vi v0, v8, 1 ; RV64-LMULMAX8-NEXT: ret ret <128 x i1> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll @@ -13,7 +13,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 1, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -33,7 +33,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -53,7 +53,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll @@ -13,7 +13,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 1, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -32,7 +32,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -53,7 +53,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 1, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -77,7 +77,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 1, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -99,7 +99,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -120,7 +120,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll @@ -2178,9 +2178,9 @@ ; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: li a0, 32 -; RV64-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; RV64-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; RV64-NEXT: vslideup.vi v12, v10, 16 -; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, <32 x i8> %idxs %v = call <32 x i8> @llvm.masked.gather.v32i8.v32p0i8(<32 x i8*> %ptrs, i32 2, <32 x i1> %m, <32 x i8> %passthru) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll @@ -56,7 +56,7 @@ ; RV32-NEXT: vmerge.vim v10, v10, 1, v0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmv.v.i v11, 0 -; RV32-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; RV32-NEXT: vslideup.vi v11, v10, 0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmsne.vi v10, v11, 0 @@ -87,7 +87,6 @@ ; RV32-NEXT: or a0, a1, a0 ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV32-NEXT: vmv.s.x v8, a0 -; RV32-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; RV32-NEXT: vslideup.vi v9, v8, 1 ; RV32-NEXT: .LBB4_4: # %else2 ; RV32-NEXT: vmv1r.v v8, v9 @@ -103,7 +102,7 @@ ; RV64-NEXT: vmerge.vim v10, v10, 1, v0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmv.v.i v11, 0 -; RV64-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; RV64-NEXT: vslideup.vi v11, v10, 0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmsne.vi v10, v11, 0 @@ -134,7 +133,6 @@ ; RV64-NEXT: or a0, a1, a0 ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV64-NEXT: vmv.s.x v8, a0 -; RV64-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; RV64-NEXT: vslideup.vi v9, v8, 1 ; RV64-NEXT: .LBB4_4: # %else2 ; RV64-NEXT: vmv1r.v v8, v9 @@ -156,7 +154,7 @@ ; RV32-NEXT: vmerge.vim v10, v10, 1, v0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmv.v.i v11, 0 -; RV32-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; RV32-NEXT: vslideup.vi v11, v10, 0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmsne.vi v10, v11, 0 @@ -173,7 +171,7 @@ ; RV32-NEXT: lw a1, 0(a1) ; RV32-NEXT: vslide1up.vx v11, v10, a2 ; RV32-NEXT: vslide1up.vx v12, v11, a1 -; RV32-NEXT: vsetivli zero, 1, e64, m1, tu, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vslideup.vi v9, v12, 0 ; RV32-NEXT: .LBB5_2: # %else ; RV32-NEXT: andi a0, a0, 2 @@ -187,7 +185,7 @@ ; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, mu ; RV32-NEXT: vslide1up.vx v8, v10, a1 ; RV32-NEXT: vslide1up.vx v10, v8, a0 -; RV32-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vslideup.vi v9, v10, 1 ; RV32-NEXT: .LBB5_4: # %else2 ; RV32-NEXT: vmv1r.v v8, v9 @@ -203,7 +201,7 @@ ; RV64-NEXT: vmerge.vim v10, v10, 1, v0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmv.v.i v11, 0 -; RV64-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; RV64-NEXT: vslideup.vi v11, v10, 0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmsne.vi v10, v11, 0 @@ -233,7 +231,7 @@ ; RV64-NEXT: slli a1, a1, 32 ; RV64-NEXT: or a0, a1, a0 ; RV64-NEXT: vmv.s.x v8, a0 -; RV64-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vslideup.vi v9, v8, 1 ; RV64-NEXT: .LBB5_4: # %else2 ; RV64-NEXT: vmv1r.v v8, v9 @@ -255,7 +253,7 @@ ; RV32-NEXT: vmerge.vim v10, v10, 1, v0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmv.v.i v11, 0 -; RV32-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; RV32-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; RV32-NEXT: vslideup.vi v11, v10, 0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmsne.vi v10, v11, 0 @@ -332,7 +330,7 @@ ; RV64-NEXT: vmerge.vim v9, v9, 1, v0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmv.v.i v12, 0 -; RV64-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; RV64-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; RV64-NEXT: vslideup.vi v12, v9, 0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmsne.vi v9, v12, 0 @@ -415,7 +413,7 @@ ; RV32-NEXT: vmerge.vim v10, v10, 1, v0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmv.v.i v11, 0 -; RV32-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; RV32-NEXT: vslideup.vi v11, v10, 0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmsne.vi v10, v11, 0 @@ -460,7 +458,7 @@ ; RV64-NEXT: vmerge.vim v10, v10, 1, v0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmv.v.i v11, 0 -; RV64-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; RV64-NEXT: vslideup.vi v11, v10, 0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmsne.vi v10, v11, 0 @@ -514,7 +512,7 @@ ; RV32-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmv.v.i v9, 0 -; RV32-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; RV32-NEXT: vslideup.vi v9, v8, 0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmsne.vi v8, v9, 0 @@ -556,10 +554,8 @@ ; RV32-NEXT: slli a0, a0, 16 ; RV32-NEXT: or a0, a0, a2 ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; RV32-NEXT: vslideup.vi v8, v9, 1 ; RV32-NEXT: .LBB8_4: # %else2 -; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV32-NEXT: vse32.v v8, (a1) ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -574,7 +570,7 @@ ; RV64-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmv.v.i v9, 0 -; RV64-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; RV64-NEXT: vslideup.vi v9, v8, 0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmsne.vi v8, v9, 0 @@ -616,10 +612,8 @@ ; RV64-NEXT: slli a0, a0, 16 ; RV64-NEXT: or a0, a0, a2 ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; RV64-NEXT: vslideup.vi v8, v9, 1 ; RV64-NEXT: .LBB8_4: # %else2 -; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV64-NEXT: vse32.v v8, (a1) ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret @@ -642,7 +636,7 @@ ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v10, v9, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v9, v10, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll @@ -311,7 +311,7 @@ ; RV64-NEXT: vmv1r.v v0, v10 ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: li a0, 32 -; RV64-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; RV64-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; RV64-NEXT: vslideup.vi v8, v12, 16 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, <32 x i8> %idxs diff --git a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll @@ -61,7 +61,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i8.nxv4i8( %vec, %subvec, i64 0) @@ -76,7 +76,7 @@ ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a1, a1, a0 ; CHECK-NEXT: add a0, a1, a0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i8.nxv4i8( %vec, %subvec, i64 3) @@ -214,7 +214,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i32.nxv16i32( %vec, %subvec, i64 0) @@ -227,7 +227,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i32.nxv16i32( %vec, %subvec, i64 1) @@ -239,7 +239,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vi v11, v16, 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i32.nxv16i32( %vec, %subvec, i64 6) @@ -251,7 +251,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 0) @@ -264,7 +264,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 1) @@ -278,7 +278,7 @@ ; CHECK-NEXT: srli a1, a0, 3 ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 2) @@ -293,7 +293,7 @@ ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a1, a1, a0 ; CHECK-NEXT: add a0, a1, a0 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 3) @@ -307,7 +307,7 @@ ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: slli a1, a0, 3 ; CHECK-NEXT: sub a0, a1, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 7) @@ -321,7 +321,7 @@ ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: slli a1, a0, 3 ; CHECK-NEXT: sub a0, a1, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vslideup.vx v9, v10, a0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 15) @@ -333,7 +333,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv2f16.nxv32f16( %vec, %subvec, i64 0) @@ -346,7 +346,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv2f16.nxv32f16( %vec, %subvec, i64 2) @@ -359,7 +359,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vx v14, v16, a0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv2f16.nxv32f16( %vec, %subvec, i64 26) @@ -395,7 +395,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v0, v8, 0 ; CHECK-NEXT: ret %vec = call @llvm.experimental.vector.insert.nxv8i1.nxv32i1( %v, %sv, i64 0) @@ -408,7 +408,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v0, v8, a0 ; CHECK-NEXT: ret %vec = call @llvm.experimental.vector.insert.nxv8i1.nxv32i1( %v, %sv, i64 8) @@ -427,7 +427,7 @@ ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v0, v9, 0 @@ -450,7 +450,7 @@ ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v9, v8, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v0, v9, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll @@ -17,7 +17,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -30,7 +30,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -52,7 +52,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -65,7 +65,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -87,7 +87,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -100,7 +100,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -122,7 +122,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -135,7 +135,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -157,7 +157,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -170,7 +170,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -192,7 +192,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfmv.s.f v16, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -205,7 +205,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -227,7 +227,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 @@ -240,7 +240,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx @@ -262,7 +262,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 @@ -275,7 +275,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx @@ -297,7 +297,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 @@ -310,7 +310,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx @@ -332,7 +332,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 @@ -345,7 +345,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx @@ -367,7 +367,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfmv.s.f v16, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 @@ -380,7 +380,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx @@ -402,7 +402,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 @@ -415,7 +415,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx @@ -437,7 +437,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 @@ -450,7 +450,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx @@ -472,7 +472,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 @@ -485,7 +485,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx @@ -507,7 +507,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfmv.s.f v16, fa0 -; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 @@ -520,7 +520,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll @@ -17,7 +17,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -30,7 +30,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -52,7 +52,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -65,7 +65,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -87,7 +87,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -100,7 +100,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -122,7 +122,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -135,7 +135,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -157,7 +157,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -170,7 +170,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -192,7 +192,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfmv.s.f v16, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -205,7 +205,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -227,7 +227,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 @@ -240,7 +240,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx @@ -262,7 +262,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 @@ -275,7 +275,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx @@ -297,7 +297,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 @@ -310,7 +310,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx @@ -332,7 +332,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 @@ -345,7 +345,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx @@ -367,7 +367,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfmv.s.f v16, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 @@ -380,7 +380,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx @@ -402,7 +402,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 @@ -415,7 +415,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx @@ -437,7 +437,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 @@ -450,7 +450,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx @@ -472,7 +472,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 @@ -485,7 +485,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx @@ -507,7 +507,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfmv.s.f v16, fa0 -; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 @@ -520,7 +520,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll --- a/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll @@ -9,7 +9,7 @@ ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 -; CHECK-NEXT: vsetivli zero, 3, e8, mf8, tu, mu +; CHECK-NEXT: vsetivli zero, 3, e8, mf8, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 2 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 @@ -27,7 +27,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vslideup.vx v9, v8, a1 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 @@ -44,7 +44,7 @@ ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 -; CHECK-NEXT: vsetivli zero, 3, e8, mf4, tu, mu +; CHECK-NEXT: vsetivli zero, 3, e8, mf4, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 2 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 @@ -62,7 +62,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vslideup.vx v9, v8, a1 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 @@ -79,7 +79,7 @@ ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 -; CHECK-NEXT: vsetivli zero, 3, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 3, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 2 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 @@ -97,7 +97,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v9, v8, a1 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 @@ -114,7 +114,7 @@ ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 -; CHECK-NEXT: vsetivli zero, 3, e8, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 3, e8, m1, ta, mu ; CHECK-NEXT: vslideup.vi v9, v8, 2 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 @@ -132,7 +132,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vslideup.vx v9, v8, a1 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 @@ -149,7 +149,7 @@ ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmerge.vim v10, v10, 1, v0 -; CHECK-NEXT: vsetivli zero, 3, e8, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 3, e8, m2, ta, mu ; CHECK-NEXT: vslideup.vi v10, v8, 2 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v10, 1 @@ -167,7 +167,7 @@ ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmerge.vim v10, v10, 1, v0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vslideup.vx v10, v8, a1 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v10, 1 @@ -184,7 +184,7 @@ ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 -; CHECK-NEXT: vsetivli zero, 3, e8, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 3, e8, m4, ta, mu ; CHECK-NEXT: vslideup.vi v12, v8, 2 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v12, 1 @@ -202,7 +202,7 @@ ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vslideup.vx v12, v8, a1 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v12, 1 @@ -219,7 +219,7 @@ ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vmerge.vim v16, v16, 1, v0 -; CHECK-NEXT: vsetivli zero, 3, e8, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 3, e8, m8, ta, mu ; CHECK-NEXT: vslideup.vi v16, v8, 2 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v16, 1 @@ -237,7 +237,7 @@ ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vmerge.vim v16, v16, 1, v0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vslideup.vx v16, v8, a1 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v16, 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll @@ -17,7 +17,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -30,7 +30,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -52,7 +52,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -65,7 +65,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -87,7 +87,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -100,7 +100,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -122,7 +122,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -135,7 +135,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -157,7 +157,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -170,7 +170,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -192,7 +192,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -205,7 +205,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -227,7 +227,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, m8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -240,7 +240,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -262,7 +262,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -275,7 +275,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -297,7 +297,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -310,7 +310,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -332,7 +332,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -345,7 +345,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -367,7 +367,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -380,7 +380,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -402,7 +402,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -415,7 +415,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e16, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -437,7 +437,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -450,7 +450,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e16, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -472,7 +472,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -485,7 +485,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -507,7 +507,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -520,7 +520,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -542,7 +542,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -555,7 +555,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e32, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -577,7 +577,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -590,7 +590,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -612,7 +612,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -625,7 +625,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -639,7 +639,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vslide1up.vx v10, v9, a1 ; CHECK-NEXT: vslide1up.vx v9, v10, a0 -; CHECK-NEXT: vsetivli zero, 1, e64, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 0 @@ -653,7 +653,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vslide1up.vx v10, v9, a1 ; CHECK-NEXT: vslide1up.vx v9, v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 @@ -668,7 +668,7 @@ ; CHECK-NEXT: vslide1up.vx v10, v9, a1 ; CHECK-NEXT: vslide1up.vx v9, v10, a0 ; CHECK-NEXT: addi a0, a2, 1 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a2 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx @@ -682,7 +682,7 @@ ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vslide1up.vx v12, v10, a1 ; CHECK-NEXT: vslide1up.vx v10, v12, a0 -; CHECK-NEXT: vsetivli zero, 1, e64, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 0 @@ -696,7 +696,7 @@ ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vslide1up.vx v12, v10, a1 ; CHECK-NEXT: vslide1up.vx v10, v12, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 @@ -711,7 +711,7 @@ ; CHECK-NEXT: vslide1up.vx v12, v10, a1 ; CHECK-NEXT: vslide1up.vx v10, v12, a0 ; CHECK-NEXT: addi a0, a2, 1 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a2 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx @@ -725,7 +725,7 @@ ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vslide1up.vx v16, v12, a1 ; CHECK-NEXT: vslide1up.vx v12, v16, a0 -; CHECK-NEXT: vsetivli zero, 1, e64, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 0 @@ -739,7 +739,7 @@ ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vslide1up.vx v16, v12, a1 ; CHECK-NEXT: vslide1up.vx v12, v16, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 @@ -754,7 +754,7 @@ ; CHECK-NEXT: vslide1up.vx v16, v12, a1 ; CHECK-NEXT: vslide1up.vx v12, v16, a0 ; CHECK-NEXT: addi a0, a2, 1 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a2 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx @@ -768,7 +768,7 @@ ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vslide1up.vx v24, v16, a1 ; CHECK-NEXT: vslide1up.vx v16, v24, a0 -; CHECK-NEXT: vsetivli zero, 1, e64, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 0 @@ -782,7 +782,7 @@ ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vslide1up.vx v24, v16, a1 ; CHECK-NEXT: vslide1up.vx v16, v24, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 @@ -797,7 +797,7 @@ ; CHECK-NEXT: vslide1up.vx v24, v16, a1 ; CHECK-NEXT: vslide1up.vx v16, v24, a0 ; CHECK-NEXT: addi a0, a2, 1 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a2 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx @@ -822,7 +822,7 @@ ; CHECK-NEXT: li a0, 10 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 10, i32 3 @@ -836,7 +836,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a1 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %r = insertelement %v, i64 10, i32 %idx @@ -860,7 +860,7 @@ ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 -1, i32 3 @@ -874,7 +874,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a1 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %r = insertelement %v, i64 -1, i32 %idx diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll @@ -17,7 +17,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -30,7 +30,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -52,7 +52,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -65,7 +65,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -87,7 +87,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -100,7 +100,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -122,7 +122,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -135,7 +135,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -157,7 +157,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -170,7 +170,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -192,7 +192,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -205,7 +205,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -227,7 +227,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, m8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -240,7 +240,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -262,7 +262,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -275,7 +275,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -297,7 +297,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -310,7 +310,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -332,7 +332,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -345,7 +345,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -367,7 +367,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -380,7 +380,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -402,7 +402,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -415,7 +415,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e16, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -437,7 +437,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -450,7 +450,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e16, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -472,7 +472,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -485,7 +485,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -507,7 +507,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -520,7 +520,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -542,7 +542,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -555,7 +555,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e32, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -577,7 +577,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -590,7 +590,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -612,7 +612,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -625,7 +625,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -647,7 +647,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 @@ -661,7 +661,7 @@ ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: sext.w a0, a1 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx @@ -683,7 +683,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 @@ -697,7 +697,7 @@ ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: sext.w a0, a1 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx @@ -719,7 +719,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 @@ -733,7 +733,7 @@ ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: sext.w a0, a1 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx @@ -755,7 +755,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 @@ -769,7 +769,7 @@ ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: sext.w a0, a1 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll --- a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll @@ -1146,20 +1146,19 @@ , iXLen, iXLen); -define @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8( %0, iXLen %1, %2, iXLen %3) nounwind { +define @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vslideup.vx v9, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslideup.mask.nxv1i8( - undef, %0, - iXLen %1, - %2, - iXLen %3, iXLen 3) + %1, + iXLen %2, + %3, + iXLen %4, iXLen 3) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll @@ -2521,7 +2521,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: srli a0, a0, 3 ; RV32-NEXT: add a1, a0, a0 -; RV32-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; RV32-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; RV32-NEXT: vslideup.vx v0, v24, a0 ; RV32-NEXT: ret ; @@ -2534,7 +2534,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: srli a0, a0, 3 ; RV64-NEXT: add a1, a0, a0 -; RV64-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; RV64-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; RV64-NEXT: vslideup.vx v0, v24, a0 ; RV64-NEXT: ret %vc = fcmp oeq %va, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll @@ -3201,7 +3201,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu ; CHECK-NEXT: vmseq.vi v24, v16, 0 ; CHECK-NEXT: vmseq.vi v0, v8, 0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vslideup.vx v0, v24, a0 ; CHECK-NEXT: ret %vc = icmp eq %va, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll --- a/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll @@ -867,6 +867,7 @@ , , iXLen, + iXLen, iXLen); define @intrinsic_vslidedown_vx_nxv1i8_nxv1i8( %0, iXLen %1, iXLen %2) nounwind { @@ -880,7 +881,8 @@ undef, %0, iXLen %1, - iXLen %2) + iXLen %2, + iXLen 1) ret %a } @@ -889,21 +891,22 @@ , , iXLen, + iXLen, iXLen); -define @intrinsic_vslideup_vx_nxv1i8_nxv1i8( %0, iXLen %1, iXLen %2) nounwind { +define @intrinsic_vslideup_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vslideup.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslideup.nxv1i8( - undef, %0, - iXLen %1, - iXLen %2) + %1, + iXLen %2, + iXLen %3, + iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll --- a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll @@ -21,7 +21,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i8( %a, %b, i32 -1) @@ -36,7 +36,7 @@ ; CHECK-NEXT: addi a0, a0, -2 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i8( %a, %b, i32 -2) @@ -51,7 +51,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i8( %a, %b, i32 1) @@ -76,7 +76,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i8( %a, %b, i32 -1) @@ -91,7 +91,7 @@ ; CHECK-NEXT: addi a0, a0, -4 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 4 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i8( %a, %b, i32 -4) @@ -106,7 +106,7 @@ ; CHECK-NEXT: addi a0, a0, -3 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 3 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i8( %a, %b, i32 3) @@ -131,7 +131,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i8( %a, %b, i32 -1) @@ -146,7 +146,7 @@ ; CHECK-NEXT: addi a0, a0, -8 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i8( %a, %b, i32 -8) @@ -161,7 +161,7 @@ ; CHECK-NEXT: addi a0, a0, -7 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 7 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i8( %a, %b, i32 7) @@ -185,7 +185,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i8( %a, %b, i32 -1) @@ -199,7 +199,7 @@ ; CHECK-NEXT: addi a0, a0, -16 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i8( %a, %b, i32 -16) @@ -213,7 +213,7 @@ ; CHECK-NEXT: addi a0, a0, -15 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 15 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i8( %a, %b, i32 15) @@ -238,7 +238,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i8( %a, %b, i32 -1) @@ -254,7 +254,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i8( %a, %b, i32 -32) @@ -269,7 +269,7 @@ ; CHECK-NEXT: addi a0, a0, -31 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 31 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i8( %a, %b, i32 31) @@ -294,7 +294,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32i8( %a, %b, i32 -1) @@ -310,7 +310,7 @@ ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32i8( %a, %b, i32 -64) @@ -326,7 +326,7 @@ ; CHECK-NEXT: li a1, 63 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a1 -; CHECK-NEXT: vsetvli a1, zero, e8, m4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32i8( %a, %b, i32 63) @@ -351,7 +351,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv64i8( %a, %b, i32 -1) @@ -367,7 +367,7 @@ ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv64i8( %a, %b, i32 -128) @@ -383,7 +383,7 @@ ; CHECK-NEXT: li a1, 127 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a1 -; CHECK-NEXT: vsetvli a1, zero, e8, m8, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv64i8( %a, %b, i32 127) @@ -408,7 +408,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i16( %a, %b, i32 -1) @@ -423,7 +423,7 @@ ; CHECK-NEXT: addi a0, a0, -2 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i16( %a, %b, i32 -2) @@ -438,7 +438,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 1 -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i16( %a, %b, i32 1) @@ -463,7 +463,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i16( %a, %b, i32 -1) @@ -478,7 +478,7 @@ ; CHECK-NEXT: addi a0, a0, -4 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 4 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i16( %a, %b, i32 -4) @@ -493,7 +493,7 @@ ; CHECK-NEXT: addi a0, a0, -3 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 3 -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i16( %a, %b, i32 3) @@ -518,7 +518,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i16( %a, %b, i32 -1) @@ -533,7 +533,7 @@ ; CHECK-NEXT: addi a0, a0, -8 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i16( %a, %b, i32 -8) @@ -548,7 +548,7 @@ ; CHECK-NEXT: addi a0, a0, -7 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 7 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i16( %a, %b, i32 7) @@ -572,7 +572,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i16( %a, %b, i32 -1) @@ -586,7 +586,7 @@ ; CHECK-NEXT: addi a0, a0, -16 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i16( %a, %b, i32 -16) @@ -600,7 +600,7 @@ ; CHECK-NEXT: addi a0, a0, -15 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 15 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i16( %a, %b, i32 15) @@ -625,7 +625,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i16( %a, %b, i32 -1) @@ -641,7 +641,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i16( %a, %b, i32 -32) @@ -656,7 +656,7 @@ ; CHECK-NEXT: addi a0, a0, -31 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 31 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i16( %a, %b, i32 31) @@ -681,7 +681,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32i16( %a, %b, i32 -1) @@ -697,7 +697,7 @@ ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32i16( %a, %b, i32 -64) @@ -713,7 +713,7 @@ ; CHECK-NEXT: li a1, 63 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a1 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32i16( %a, %b, i32 63) @@ -738,7 +738,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i32( %a, %b, i32 -1) @@ -753,7 +753,7 @@ ; CHECK-NEXT: addi a0, a0, -2 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i32( %a, %b, i32 -2) @@ -768,7 +768,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 1 -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i32( %a, %b, i32 1) @@ -793,7 +793,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i32( %a, %b, i32 -1) @@ -808,7 +808,7 @@ ; CHECK-NEXT: addi a0, a0, -4 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 4 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i32( %a, %b, i32 -4) @@ -823,7 +823,7 @@ ; CHECK-NEXT: addi a0, a0, -3 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 3 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i32( %a, %b, i32 3) @@ -848,7 +848,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i32( %a, %b, i32 -1) @@ -863,7 +863,7 @@ ; CHECK-NEXT: addi a0, a0, -8 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i32( %a, %b, i32 -8) @@ -878,7 +878,7 @@ ; CHECK-NEXT: addi a0, a0, -7 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 7 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i32( %a, %b, i32 7) @@ -902,7 +902,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i32( %a, %b, i32 -1) @@ -916,7 +916,7 @@ ; CHECK-NEXT: addi a0, a0, -16 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i32( %a, %b, i32 -16) @@ -930,7 +930,7 @@ ; CHECK-NEXT: addi a0, a0, -15 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 15 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i32( %a, %b, i32 15) @@ -955,7 +955,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i32( %a, %b, i32 -1) @@ -971,7 +971,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i32( %a, %b, i32 -32) @@ -986,7 +986,7 @@ ; CHECK-NEXT: addi a0, a0, -31 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 31 -; CHECK-NEXT: vsetvli a1, zero, e32, m8, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i32( %a, %b, i32 31) @@ -1011,7 +1011,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i64( %a, %b, i32 -1) @@ -1026,7 +1026,7 @@ ; CHECK-NEXT: addi a0, a0, -2 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i64( %a, %b, i32 -2) @@ -1041,7 +1041,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 1 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i64( %a, %b, i32 1) @@ -1066,7 +1066,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i64( %a, %b, i32 -1) @@ -1081,7 +1081,7 @@ ; CHECK-NEXT: addi a0, a0, -4 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 4 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i64( %a, %b, i32 -4) @@ -1096,7 +1096,7 @@ ; CHECK-NEXT: addi a0, a0, -3 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 3 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i64( %a, %b, i32 3) @@ -1121,7 +1121,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i64( %a, %b, i32 -1) @@ -1136,7 +1136,7 @@ ; CHECK-NEXT: addi a0, a0, -8 ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i64( %a, %b, i32 -8) @@ -1151,7 +1151,7 @@ ; CHECK-NEXT: addi a0, a0, -7 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 7 -; CHECK-NEXT: vsetvli a1, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i64( %a, %b, i32 7) @@ -1175,7 +1175,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i64( %a, %b, i32 -1) @@ -1189,7 +1189,7 @@ ; CHECK-NEXT: addi a0, a0, -16 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i64( %a, %b, i32 -16) @@ -1203,7 +1203,7 @@ ; CHECK-NEXT: addi a0, a0, -15 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 15 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i64( %a, %b, i32 15) @@ -1228,7 +1228,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f16( %a, %b, i32 -1) @@ -1243,7 +1243,7 @@ ; CHECK-NEXT: addi a0, a0, -2 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f16( %a, %b, i32 -2) @@ -1258,7 +1258,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 1 -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f16( %a, %b, i32 1) @@ -1283,7 +1283,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f16( %a, %b, i32 -1) @@ -1298,7 +1298,7 @@ ; CHECK-NEXT: addi a0, a0, -4 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 4 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f16( %a, %b, i32 -4) @@ -1313,7 +1313,7 @@ ; CHECK-NEXT: addi a0, a0, -3 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 3 -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f16( %a, %b, i32 3) @@ -1338,7 +1338,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f16( %a, %b, i32 -1) @@ -1353,7 +1353,7 @@ ; CHECK-NEXT: addi a0, a0, -8 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f16( %a, %b, i32 -8) @@ -1368,7 +1368,7 @@ ; CHECK-NEXT: addi a0, a0, -7 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 7 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f16( %a, %b, i32 7) @@ -1392,7 +1392,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f16( %a, %b, i32 -1) @@ -1406,7 +1406,7 @@ ; CHECK-NEXT: addi a0, a0, -16 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f16( %a, %b, i32 -16) @@ -1420,7 +1420,7 @@ ; CHECK-NEXT: addi a0, a0, -15 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 15 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f16( %a, %b, i32 15) @@ -1445,7 +1445,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16f16( %a, %b, i32 -1) @@ -1461,7 +1461,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16f16( %a, %b, i32 -32) @@ -1476,7 +1476,7 @@ ; CHECK-NEXT: addi a0, a0, -31 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 31 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16f16( %a, %b, i32 31) @@ -1501,7 +1501,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32f16( %a, %b, i32 -1) @@ -1517,7 +1517,7 @@ ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32f16( %a, %b, i32 -64) @@ -1533,7 +1533,7 @@ ; CHECK-NEXT: li a1, 63 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a1 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32f16( %a, %b, i32 63) @@ -1558,7 +1558,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f32( %a, %b, i32 -1) @@ -1573,7 +1573,7 @@ ; CHECK-NEXT: addi a0, a0, -2 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f32( %a, %b, i32 -2) @@ -1588,7 +1588,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 1 -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f32( %a, %b, i32 1) @@ -1613,7 +1613,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f32( %a, %b, i32 -1) @@ -1628,7 +1628,7 @@ ; CHECK-NEXT: addi a0, a0, -4 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 4 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f32( %a, %b, i32 -4) @@ -1643,7 +1643,7 @@ ; CHECK-NEXT: addi a0, a0, -3 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 3 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f32( %a, %b, i32 3) @@ -1668,7 +1668,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f32( %a, %b, i32 -1) @@ -1683,7 +1683,7 @@ ; CHECK-NEXT: addi a0, a0, -8 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f32( %a, %b, i32 -8) @@ -1698,7 +1698,7 @@ ; CHECK-NEXT: addi a0, a0, -7 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 7 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f32( %a, %b, i32 7) @@ -1722,7 +1722,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f32( %a, %b, i32 -1) @@ -1736,7 +1736,7 @@ ; CHECK-NEXT: addi a0, a0, -16 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f32( %a, %b, i32 -16) @@ -1750,7 +1750,7 @@ ; CHECK-NEXT: addi a0, a0, -15 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 15 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f32( %a, %b, i32 15) @@ -1775,7 +1775,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16f32( %a, %b, i32 -1) @@ -1791,7 +1791,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16f32( %a, %b, i32 -32) @@ -1806,7 +1806,7 @@ ; CHECK-NEXT: addi a0, a0, -31 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 31 -; CHECK-NEXT: vsetvli a1, zero, e32, m8, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16f32( %a, %b, i32 31) @@ -1831,7 +1831,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f64( %a, %b, i32 -1) @@ -1846,7 +1846,7 @@ ; CHECK-NEXT: addi a0, a0, -2 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f64( %a, %b, i32 -2) @@ -1861,7 +1861,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 1 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f64( %a, %b, i32 1) @@ -1886,7 +1886,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f64( %a, %b, i32 -1) @@ -1901,7 +1901,7 @@ ; CHECK-NEXT: addi a0, a0, -4 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 4 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f64( %a, %b, i32 -4) @@ -1916,7 +1916,7 @@ ; CHECK-NEXT: addi a0, a0, -3 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 3 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f64( %a, %b, i32 3) @@ -1941,7 +1941,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f64( %a, %b, i32 -1) @@ -1956,7 +1956,7 @@ ; CHECK-NEXT: addi a0, a0, -8 ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f64( %a, %b, i32 -8) @@ -1971,7 +1971,7 @@ ; CHECK-NEXT: addi a0, a0, -7 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 7 -; CHECK-NEXT: vsetvli a1, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f64( %a, %b, i32 7) @@ -1995,7 +1995,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f64( %a, %b, i32 -1) @@ -2009,7 +2009,7 @@ ; CHECK-NEXT: addi a0, a0, -16 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v16, 16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f64( %a, %b, i32 -16) @@ -2023,7 +2023,7 @@ ; CHECK-NEXT: addi a0, a0, -15 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 15 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f64( %a, %b, i32 15) diff --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll @@ -5,12 +5,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv1i8_nxv1i8( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -18,7 +20,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -50,7 +53,7 @@ define @intrinsic_vslidedown_vi_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -58,7 +61,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -84,12 +88,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv2i8_nxv2i8( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -97,7 +103,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -129,7 +136,7 @@ define @intrinsic_vslidedown_vi_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -137,7 +144,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -163,12 +171,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv4i8_nxv4i8( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -176,7 +186,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -208,7 +219,7 @@ define @intrinsic_vslidedown_vi_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -216,7 +227,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -242,12 +254,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv8i8_nxv8i8( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -255,7 +269,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -287,7 +302,7 @@ define @intrinsic_vslidedown_vi_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -295,7 +310,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -321,12 +337,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv16i8_nxv16i8( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -334,7 +352,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -366,7 +385,7 @@ define @intrinsic_vslidedown_vi_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -374,7 +393,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -400,12 +420,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv32i8_nxv32i8( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -413,7 +435,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -445,7 +468,7 @@ define @intrinsic_vslidedown_vi_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -453,7 +476,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -479,12 +503,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv1i16_nxv1i16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -492,7 +518,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -524,7 +551,7 @@ define @intrinsic_vslidedown_vi_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -532,7 +559,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -558,12 +586,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv2i16_nxv2i16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -571,7 +601,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -603,7 +634,7 @@ define @intrinsic_vslidedown_vi_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -611,7 +642,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -637,12 +669,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv4i16_nxv4i16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -650,7 +684,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -682,7 +717,7 @@ define @intrinsic_vslidedown_vi_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -690,7 +725,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -716,12 +752,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv8i16_nxv8i16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -729,7 +767,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -761,7 +800,7 @@ define @intrinsic_vslidedown_vi_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -769,7 +808,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -795,12 +835,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv16i16_nxv16i16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -808,7 +850,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -840,7 +883,7 @@ define @intrinsic_vslidedown_vi_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -848,7 +891,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -874,12 +918,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv1i32_nxv1i32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -887,7 +933,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -919,7 +966,7 @@ define @intrinsic_vslidedown_vi_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -927,7 +974,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -953,12 +1001,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv2i32_nxv2i32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -966,7 +1016,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -998,7 +1049,7 @@ define @intrinsic_vslidedown_vi_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1006,7 +1057,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1032,12 +1084,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv4i32_nxv4i32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1045,7 +1099,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -1077,7 +1132,7 @@ define @intrinsic_vslidedown_vi_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -1085,7 +1140,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1111,12 +1167,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv8i32_nxv8i32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -1124,7 +1182,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -1156,7 +1215,7 @@ define @intrinsic_vslidedown_vi_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -1164,7 +1223,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1190,12 +1250,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv1i64_nxv1i64( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1203,7 +1265,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -1235,7 +1298,7 @@ define @intrinsic_vslidedown_vi_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1243,7 +1306,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1269,12 +1333,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv2i64_nxv2i64( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1282,7 +1348,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -1314,7 +1381,7 @@ define @intrinsic_vslidedown_vi_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -1322,7 +1389,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1348,12 +1416,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv4i64_nxv4i64( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -1361,7 +1431,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -1393,7 +1464,7 @@ define @intrinsic_vslidedown_vi_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -1401,7 +1472,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1427,12 +1499,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv1f16_nxv1f16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1440,7 +1514,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -1472,7 +1547,7 @@ define @intrinsic_vslidedown_vi_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1480,7 +1555,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1506,12 +1582,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv2f16_nxv2f16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1519,7 +1597,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -1551,7 +1630,7 @@ define @intrinsic_vslidedown_vi_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1559,7 +1638,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1585,12 +1665,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv4f16_nxv4f16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1598,7 +1680,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -1630,7 +1713,7 @@ define @intrinsic_vslidedown_vi_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1638,7 +1721,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1664,12 +1748,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv8f16_nxv8f16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1677,7 +1763,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -1709,7 +1796,7 @@ define @intrinsic_vslidedown_vi_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -1717,7 +1804,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1743,12 +1831,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv16f16_nxv16f16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -1756,7 +1846,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -1788,7 +1879,7 @@ define @intrinsic_vslidedown_vi_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -1796,7 +1887,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1822,12 +1914,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv1f32_nxv1f32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1835,7 +1929,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -1867,7 +1962,7 @@ define @intrinsic_vslidedown_vi_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1875,7 +1970,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1901,12 +1997,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv2f32_nxv2f32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1914,7 +2012,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -1946,7 +2045,7 @@ define @intrinsic_vslidedown_vi_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1954,7 +2053,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1980,12 +2080,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv4f32_nxv4f32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1993,7 +2095,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -2025,7 +2128,7 @@ define @intrinsic_vslidedown_vi_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -2033,7 +2136,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -2059,12 +2163,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv8f32_nxv8f32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -2072,7 +2178,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -2104,7 +2211,7 @@ define @intrinsic_vslidedown_vi_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -2112,7 +2219,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -2138,12 +2246,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv1f64_nxv1f64( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -2151,7 +2261,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -2183,7 +2294,7 @@ define @intrinsic_vslidedown_vi_nxv1f64_nxv1f64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -2191,7 +2302,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -2217,12 +2329,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv2f64_nxv2f64( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -2230,7 +2344,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -2262,7 +2377,7 @@ define @intrinsic_vslidedown_vi_nxv2f64_nxv2f64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -2270,7 +2385,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -2296,12 +2412,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslidedown_vx_nxv4f64_nxv4f64( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -2309,7 +2427,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -2341,7 +2460,7 @@ define @intrinsic_vslidedown_vi_nxv4f64_nxv4f64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -2349,7 +2468,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll @@ -5,12 +5,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv1i8_nxv1i8( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -18,7 +20,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -50,7 +53,7 @@ define @intrinsic_vslidedown_vi_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -58,7 +61,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -84,12 +88,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv2i8_nxv2i8( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -97,7 +103,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -129,7 +136,7 @@ define @intrinsic_vslidedown_vi_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -137,7 +144,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -163,12 +171,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv4i8_nxv4i8( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -176,7 +186,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -208,7 +219,7 @@ define @intrinsic_vslidedown_vi_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -216,7 +227,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -242,12 +254,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv8i8_nxv8i8( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -255,7 +269,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -287,7 +302,7 @@ define @intrinsic_vslidedown_vi_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -295,7 +310,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -321,12 +337,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv16i8_nxv16i8( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -334,7 +352,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -366,7 +385,7 @@ define @intrinsic_vslidedown_vi_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -374,7 +393,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -400,12 +420,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv32i8_nxv32i8( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -413,7 +435,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -445,7 +468,7 @@ define @intrinsic_vslidedown_vi_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -453,7 +476,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -479,12 +503,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv1i16_nxv1i16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -492,7 +518,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -524,7 +551,7 @@ define @intrinsic_vslidedown_vi_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -532,7 +559,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -558,12 +586,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv2i16_nxv2i16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -571,7 +601,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -603,7 +634,7 @@ define @intrinsic_vslidedown_vi_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -611,7 +642,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -637,12 +669,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv4i16_nxv4i16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -650,7 +684,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -682,7 +717,7 @@ define @intrinsic_vslidedown_vi_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -690,7 +725,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -716,12 +752,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv8i16_nxv8i16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -729,7 +767,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -761,7 +800,7 @@ define @intrinsic_vslidedown_vi_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -769,7 +808,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -795,12 +835,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv16i16_nxv16i16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -808,7 +850,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -840,7 +883,7 @@ define @intrinsic_vslidedown_vi_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -848,7 +891,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -874,12 +918,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv1i32_nxv1i32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -887,7 +933,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -919,7 +966,7 @@ define @intrinsic_vslidedown_vi_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -927,7 +974,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -953,12 +1001,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv2i32_nxv2i32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -966,7 +1016,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -998,7 +1049,7 @@ define @intrinsic_vslidedown_vi_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1006,7 +1057,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1032,12 +1084,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv4i32_nxv4i32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1045,7 +1099,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -1077,7 +1132,7 @@ define @intrinsic_vslidedown_vi_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -1085,7 +1140,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1111,12 +1167,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv8i32_nxv8i32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -1124,7 +1182,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -1156,7 +1215,7 @@ define @intrinsic_vslidedown_vi_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -1164,7 +1223,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1190,12 +1250,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv1i64_nxv1i64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1203,7 +1265,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -1235,7 +1298,7 @@ define @intrinsic_vslidedown_vi_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1243,7 +1306,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1269,12 +1333,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv2i64_nxv2i64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1282,7 +1348,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -1314,7 +1381,7 @@ define @intrinsic_vslidedown_vi_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -1322,7 +1389,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1348,12 +1416,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv4i64_nxv4i64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -1361,7 +1431,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -1393,7 +1464,7 @@ define @intrinsic_vslidedown_vi_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -1401,7 +1472,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1427,12 +1499,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv1f16_nxv1f16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1440,7 +1514,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -1472,7 +1547,7 @@ define @intrinsic_vslidedown_vi_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1480,7 +1555,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1506,12 +1582,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv2f16_nxv2f16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1519,7 +1597,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -1551,7 +1630,7 @@ define @intrinsic_vslidedown_vi_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1559,7 +1638,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1585,12 +1665,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv4f16_nxv4f16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1598,7 +1680,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -1630,7 +1713,7 @@ define @intrinsic_vslidedown_vi_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1638,7 +1721,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1664,12 +1748,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv8f16_nxv8f16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1677,7 +1763,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -1709,7 +1796,7 @@ define @intrinsic_vslidedown_vi_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -1717,7 +1804,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1743,12 +1831,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv16f16_nxv16f16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -1756,7 +1846,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -1788,7 +1879,7 @@ define @intrinsic_vslidedown_vi_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -1796,7 +1887,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1822,12 +1914,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv1f32_nxv1f32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1835,7 +1929,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -1867,7 +1962,7 @@ define @intrinsic_vslidedown_vi_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1875,7 +1970,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1901,12 +1997,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv2f32_nxv2f32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1914,7 +2012,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -1946,7 +2045,7 @@ define @intrinsic_vslidedown_vi_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1954,7 +2053,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1980,12 +2080,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv4f32_nxv4f32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1993,7 +2095,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -2025,7 +2128,7 @@ define @intrinsic_vslidedown_vi_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -2033,7 +2136,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -2059,12 +2163,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv8f32_nxv8f32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -2072,7 +2178,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -2104,7 +2211,7 @@ define @intrinsic_vslidedown_vi_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -2112,7 +2219,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -2138,12 +2246,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv1f64_nxv1f64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -2151,7 +2261,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -2183,7 +2294,7 @@ define @intrinsic_vslidedown_vi_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -2191,7 +2302,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -2217,12 +2329,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv2f64_nxv2f64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -2230,7 +2344,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -2262,7 +2377,7 @@ define @intrinsic_vslidedown_vi_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -2270,7 +2385,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -2296,12 +2412,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslidedown_vx_nxv4f64_nxv4f64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -2309,7 +2427,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -2341,7 +2460,7 @@ define @intrinsic_vslidedown_vi_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -2349,7 +2468,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll @@ -5,12 +5,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv1i8_nxv1i8( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -18,7 +20,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -50,7 +53,7 @@ define @intrinsic_vslideup_vi_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -58,7 +61,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -84,12 +88,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv2i8_nxv2i8( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -97,7 +103,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -129,7 +136,7 @@ define @intrinsic_vslideup_vi_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -137,7 +144,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -163,12 +171,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv4i8_nxv4i8( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -176,7 +186,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -208,7 +219,7 @@ define @intrinsic_vslideup_vi_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -216,7 +227,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -242,12 +254,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv8i8_nxv8i8( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -255,7 +269,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -287,7 +302,7 @@ define @intrinsic_vslideup_vi_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -295,7 +310,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -321,12 +337,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv16i8_nxv16i8( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -334,7 +352,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -366,7 +385,7 @@ define @intrinsic_vslideup_vi_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -374,7 +393,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -400,12 +420,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv32i8_nxv32i8( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -413,7 +435,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -445,7 +468,7 @@ define @intrinsic_vslideup_vi_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -453,7 +476,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -479,12 +503,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv1i16_nxv1i16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -492,7 +518,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -524,7 +551,7 @@ define @intrinsic_vslideup_vi_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -532,7 +559,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -558,12 +586,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv2i16_nxv2i16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -571,7 +601,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -603,7 +634,7 @@ define @intrinsic_vslideup_vi_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -611,7 +642,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -637,12 +669,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv4i16_nxv4i16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -650,7 +684,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -682,7 +717,7 @@ define @intrinsic_vslideup_vi_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -690,7 +725,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -716,12 +752,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv8i16_nxv8i16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -729,7 +767,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -761,7 +800,7 @@ define @intrinsic_vslideup_vi_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -769,7 +808,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -795,12 +835,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv16i16_nxv16i16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -808,7 +850,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -840,7 +883,7 @@ define @intrinsic_vslideup_vi_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -848,7 +891,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -874,12 +918,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv1i32_nxv1i32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -887,7 +933,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -919,7 +966,7 @@ define @intrinsic_vslideup_vi_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -927,7 +974,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -953,12 +1001,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv2i32_nxv2i32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -966,7 +1016,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -998,7 +1049,7 @@ define @intrinsic_vslideup_vi_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1006,7 +1057,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1032,12 +1084,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv4i32_nxv4i32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1045,7 +1099,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -1077,7 +1132,7 @@ define @intrinsic_vslideup_vi_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -1085,7 +1140,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1111,12 +1167,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv8i32_nxv8i32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -1124,7 +1182,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -1156,7 +1215,7 @@ define @intrinsic_vslideup_vi_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -1164,7 +1223,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1190,12 +1250,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv1i64_nxv1i64( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1203,7 +1265,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -1235,7 +1298,7 @@ define @intrinsic_vslideup_vi_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1243,7 +1306,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1269,12 +1333,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv2i64_nxv2i64( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1282,7 +1348,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -1314,7 +1381,7 @@ define @intrinsic_vslideup_vi_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -1322,7 +1389,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1348,12 +1416,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv4i64_nxv4i64( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -1361,7 +1431,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -1393,7 +1464,7 @@ define @intrinsic_vslideup_vi_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -1401,7 +1472,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1427,12 +1499,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv1f16_nxv1f16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1440,7 +1514,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -1472,7 +1547,7 @@ define @intrinsic_vslideup_vi_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1480,7 +1555,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1506,12 +1582,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv2f16_nxv2f16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1519,7 +1597,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -1551,7 +1630,7 @@ define @intrinsic_vslideup_vi_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1559,7 +1638,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1585,12 +1665,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv4f16_nxv4f16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1598,7 +1680,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -1630,7 +1713,7 @@ define @intrinsic_vslideup_vi_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1638,7 +1721,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1664,12 +1748,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv8f16_nxv8f16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1677,7 +1763,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -1709,7 +1796,7 @@ define @intrinsic_vslideup_vi_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -1717,7 +1804,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1743,12 +1831,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv16f16_nxv16f16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -1756,7 +1846,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -1788,7 +1879,7 @@ define @intrinsic_vslideup_vi_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -1796,7 +1887,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1822,12 +1914,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv1f32_nxv1f32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1835,7 +1929,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -1867,7 +1962,7 @@ define @intrinsic_vslideup_vi_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1875,7 +1970,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1901,12 +1997,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv2f32_nxv2f32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1914,7 +2012,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -1946,7 +2045,7 @@ define @intrinsic_vslideup_vi_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1954,7 +2053,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -1980,12 +2080,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv4f32_nxv4f32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1993,7 +2095,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -2025,7 +2128,7 @@ define @intrinsic_vslideup_vi_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -2033,7 +2136,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -2059,12 +2163,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv8f32_nxv8f32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -2072,7 +2178,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -2104,7 +2211,7 @@ define @intrinsic_vslideup_vi_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -2112,7 +2219,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -2138,12 +2246,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv1f64_nxv1f64( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -2151,7 +2261,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -2183,7 +2294,7 @@ define @intrinsic_vslideup_vi_nxv1f64_nxv1f64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -2191,7 +2302,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -2217,12 +2329,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv2f64_nxv2f64( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -2230,7 +2344,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -2262,7 +2377,7 @@ define @intrinsic_vslideup_vi_nxv2f64_nxv2f64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -2270,7 +2385,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } @@ -2296,12 +2412,14 @@ , , i32, - i32); + i32, + i32 +); define @intrinsic_vslideup_vx_nxv4f64_nxv4f64( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -2309,7 +2427,8 @@ %0, %1, i32 %2, - i32 %3) + i32 %3, + i32 1) ret %a } @@ -2341,7 +2460,7 @@ define @intrinsic_vslideup_vi_nxv4f64_nxv4f64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -2349,7 +2468,8 @@ %0, %1, i32 9, - i32 %2) + i32 %2, + i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll @@ -5,12 +5,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv1i8_nxv1i8( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -18,7 +20,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -50,7 +53,7 @@ define @intrinsic_vslideup_vi_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -58,7 +61,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -84,12 +88,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv2i8_nxv2i8( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -97,7 +103,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -129,7 +136,7 @@ define @intrinsic_vslideup_vi_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -137,7 +144,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -163,12 +171,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv4i8_nxv4i8( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -176,7 +186,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -208,7 +219,7 @@ define @intrinsic_vslideup_vi_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -216,7 +227,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -242,12 +254,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv8i8_nxv8i8( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -255,7 +269,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -287,7 +302,7 @@ define @intrinsic_vslideup_vi_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -295,7 +310,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -321,12 +337,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv16i8_nxv16i8( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -334,7 +352,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -366,7 +385,7 @@ define @intrinsic_vslideup_vi_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -374,7 +393,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -400,12 +420,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv32i8_nxv32i8( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -413,7 +435,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -445,7 +468,7 @@ define @intrinsic_vslideup_vi_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -453,7 +476,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -479,12 +503,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv1i16_nxv1i16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -492,7 +518,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -524,7 +551,7 @@ define @intrinsic_vslideup_vi_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -532,7 +559,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -558,12 +586,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv2i16_nxv2i16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -571,7 +601,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -603,7 +634,7 @@ define @intrinsic_vslideup_vi_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -611,7 +642,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -637,12 +669,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv4i16_nxv4i16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -650,7 +684,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -682,7 +717,7 @@ define @intrinsic_vslideup_vi_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -690,7 +725,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -716,12 +752,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv8i16_nxv8i16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -729,7 +767,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -761,7 +800,7 @@ define @intrinsic_vslideup_vi_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -769,7 +808,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -795,12 +835,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv16i16_nxv16i16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -808,7 +850,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -840,7 +883,7 @@ define @intrinsic_vslideup_vi_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -848,7 +891,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -874,12 +918,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv1i32_nxv1i32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -887,7 +933,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -919,7 +966,7 @@ define @intrinsic_vslideup_vi_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -927,7 +974,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -953,12 +1001,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv2i32_nxv2i32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -966,7 +1016,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -998,7 +1049,7 @@ define @intrinsic_vslideup_vi_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1006,7 +1057,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1032,12 +1084,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv4i32_nxv4i32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1045,7 +1099,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -1077,7 +1132,7 @@ define @intrinsic_vslideup_vi_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -1085,7 +1140,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1111,12 +1167,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv8i32_nxv8i32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -1124,7 +1182,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -1156,7 +1215,7 @@ define @intrinsic_vslideup_vi_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -1164,7 +1223,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1190,12 +1250,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv1i64_nxv1i64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1203,7 +1265,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -1235,7 +1298,7 @@ define @intrinsic_vslideup_vi_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1243,7 +1306,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1269,12 +1333,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv2i64_nxv2i64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1282,7 +1348,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -1314,7 +1381,7 @@ define @intrinsic_vslideup_vi_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -1322,7 +1389,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1348,12 +1416,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv4i64_nxv4i64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -1361,7 +1431,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -1393,7 +1464,7 @@ define @intrinsic_vslideup_vi_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -1401,7 +1472,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1427,12 +1499,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv1f16_nxv1f16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1440,7 +1514,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -1472,7 +1547,7 @@ define @intrinsic_vslideup_vi_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1480,7 +1555,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1506,12 +1582,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv2f16_nxv2f16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1519,7 +1597,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -1551,7 +1630,7 @@ define @intrinsic_vslideup_vi_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1559,7 +1638,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1585,12 +1665,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv4f16_nxv4f16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1598,7 +1680,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -1630,7 +1713,7 @@ define @intrinsic_vslideup_vi_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1638,7 +1721,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1664,12 +1748,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv8f16_nxv8f16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1677,7 +1763,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -1709,7 +1796,7 @@ define @intrinsic_vslideup_vi_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -1717,7 +1804,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1743,12 +1831,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv16f16_nxv16f16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -1756,7 +1846,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -1788,7 +1879,7 @@ define @intrinsic_vslideup_vi_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -1796,7 +1887,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1822,12 +1914,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv1f32_nxv1f32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1835,7 +1929,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -1867,7 +1962,7 @@ define @intrinsic_vslideup_vi_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1875,7 +1970,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1901,12 +1997,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv2f32_nxv2f32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1914,7 +2012,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -1946,7 +2045,7 @@ define @intrinsic_vslideup_vi_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1954,7 +2053,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -1980,12 +2080,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv4f32_nxv4f32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1993,7 +2095,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -2025,7 +2128,7 @@ define @intrinsic_vslideup_vi_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -2033,7 +2136,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -2059,12 +2163,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv8f32_nxv8f32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -2072,7 +2178,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -2104,7 +2211,7 @@ define @intrinsic_vslideup_vi_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -2112,7 +2219,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -2138,12 +2246,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv1f64_nxv1f64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -2151,7 +2261,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -2183,7 +2294,7 @@ define @intrinsic_vslideup_vi_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -2191,7 +2302,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -2217,12 +2329,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv2f64_nxv2f64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -2230,7 +2344,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -2262,7 +2377,7 @@ define @intrinsic_vslideup_vi_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -2270,7 +2385,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a } @@ -2296,12 +2412,14 @@ , , i64, - i64); + i64, + i64 +); define @intrinsic_vslideup_vx_nxv4f64_nxv4f64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -2309,7 +2427,8 @@ %0, %1, i64 %2, - i64 %3) + i64 %3, + i64 1) ret %a } @@ -2341,7 +2460,7 @@ define @intrinsic_vslideup_vi_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -2349,7 +2468,8 @@ %0, %1, i64 9, - i64 %2) + i64 %2, + i64 1) ret %a }