diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -459,10 +459,21 @@ let HasMaskedOffOperand = false; } +let UnMaskedPolicyScheme = HasPolicyOperand, + HasMaskedOffOperand = false, + IsPrototypeDefaultTU = false in { + multiclass RVVSlideUpBuiltinSet { + defm "" : RVVOutBuiltinSet; + defm "" : RVVOutBuiltinSet; + } +} + let UnMaskedPolicyScheme = HasPolicyOperand, HasMaskedOffOperand = false, IsPrototypeDefaultTU = true in { - multiclass RVVSlideBuiltinSet { + multiclass RVVSlideDownBuiltinSet { defm "" : RVVOutBuiltinSet; defm "" : RVVOutBuiltinSet @llvm.riscv.vslideup.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src, size_t offset, @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src, size_t offset, @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src, size_t offset, @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset, @@ -47,7 +47,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset, @@ -57,7 +57,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset, @@ -67,7 +67,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset, @@ -77,7 +77,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src, @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src, @@ -97,7 +97,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dst, vint16m1_t src, size_t offset, @@ -107,7 +107,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dst, vint16m2_t src, size_t offset, @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dst, vint16m4_t src, size_t offset, @@ -127,7 +127,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dst, vint16m8_t src, size_t offset, @@ -137,7 +137,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src, @@ -147,7 +147,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dst, vint32m1_t src, size_t offset, @@ -157,7 +157,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dst, vint32m2_t src, size_t offset, @@ -167,7 +167,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dst, vint32m4_t src, size_t offset, @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dst, vint32m8_t src, size_t offset, @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dst, vint64m1_t src, size_t offset, @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dst, vint64m2_t src, size_t offset, @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dst, vint64m4_t src, size_t offset, @@ -217,7 +217,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dst, vint64m8_t src, size_t offset, @@ -227,7 +227,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src, @@ -237,7 +237,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src, @@ -247,7 +247,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src, @@ -257,7 +257,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dst, vuint8m1_t src, size_t offset, @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dst, vuint8m2_t src, size_t offset, @@ -277,7 +277,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dst, vuint8m4_t src, size_t offset, @@ -287,7 +287,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dst, vuint8m8_t src, size_t offset, @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src, @@ -307,7 +307,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src, @@ -317,7 +317,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dst, vuint16m1_t src, @@ -327,7 +327,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dst, vuint16m2_t src, @@ -337,7 +337,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dst, vuint16m4_t src, @@ -347,7 +347,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dst, vuint16m8_t src, @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src, @@ -367,7 +367,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dst, vuint32m1_t src, @@ -377,7 +377,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dst, vuint32m2_t src, @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dst, vuint32m4_t src, @@ -397,7 +397,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dst, vuint32m8_t src, @@ -407,7 +407,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dst, vuint64m1_t src, @@ -417,7 +417,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dst, vuint64m2_t src, @@ -427,7 +427,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dst, vuint64m4_t src, @@ -437,7 +437,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dst, vuint64m8_t src, @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src, @@ -457,7 +457,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src, @@ -467,7 +467,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src, @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src, @@ -487,7 +487,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src, @@ -497,7 +497,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src, @@ -507,7 +507,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src, @@ -517,7 +517,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src, @@ -527,7 +527,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src, size_t offset, @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src, size_t offset, @@ -28,7 +28,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src, size_t offset, @@ -38,7 +38,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset, @@ -48,7 +48,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset, @@ -58,7 +58,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset, @@ -68,7 +68,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset, @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src, @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src, @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dst, vint16m1_t src, size_t offset, @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dst, vint16m2_t src, size_t offset, @@ -118,7 +118,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dst, vint16m4_t src, size_t offset, @@ -128,7 +128,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dst, vint16m8_t src, size_t offset, @@ -138,7 +138,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src, @@ -148,7 +148,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dst, vint32m1_t src, size_t offset, @@ -158,7 +158,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dst, vint32m2_t src, size_t offset, @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dst, vint32m4_t src, size_t offset, @@ -178,7 +178,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dst, vint32m8_t src, size_t offset, @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dst, vint64m1_t src, size_t offset, @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dst, vint64m2_t src, size_t offset, @@ -208,7 +208,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dst, vint64m4_t src, size_t offset, @@ -218,7 +218,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dst, vint64m8_t src, size_t offset, @@ -228,7 +228,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src, @@ -238,7 +238,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src, @@ -248,7 +248,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src, @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dst, vuint8m1_t src, size_t offset, @@ -268,7 +268,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dst, vuint8m2_t src, size_t offset, @@ -278,7 +278,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dst, vuint8m4_t src, size_t offset, @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dst, vuint8m8_t src, size_t offset, @@ -298,7 +298,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src, @@ -308,7 +308,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src, @@ -318,7 +318,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dst, vuint16m1_t src, @@ -328,7 +328,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dst, vuint16m2_t src, @@ -338,7 +338,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dst, vuint16m4_t src, @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dst, vuint16m8_t src, @@ -358,7 +358,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src, @@ -368,7 +368,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dst, vuint32m1_t src, @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dst, vuint32m2_t src, @@ -388,7 +388,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dst, vuint32m4_t src, @@ -398,7 +398,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dst, vuint32m8_t src, @@ -408,7 +408,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dst, vuint64m1_t src, @@ -418,7 +418,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dst, vuint64m2_t src, @@ -428,7 +428,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dst, vuint64m4_t src, @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dst, vuint64m8_t src, @@ -448,7 +448,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src, @@ -458,7 +458,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src, @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src, @@ -478,7 +478,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src, @@ -488,7 +488,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src, @@ -498,7 +498,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src, @@ -508,7 +508,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src, @@ -518,7 +518,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src, @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src, @@ -1098,7 +1098,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslideup_vx_f16mf4 (vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { @@ -1107,7 +1107,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslideup_vx_f16mf2 (vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { @@ -1116,7 +1116,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslideup_vx_f16m1 (vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { @@ -1125,7 +1125,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslideup_vx_f16m2 (vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { @@ -1134,7 +1134,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslideup_vx_f16m4 (vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { @@ -1143,7 +1143,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslideup_vx_f16m8 (vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {